2 * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
3 * Copyright(c) 2009 Intel Corporation
5 * based on raid6recov.c:
6 * Copyright 2002 H. Peter Anvin
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 51
20 * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/raid/pq.h>
27 #include <linux/async_tx.h>
29 static struct dma_async_tx_descriptor *
30 async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
31 size_t len, struct async_submit_ctl *submit)
33 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
34 &dest, 1, srcs, 2, len);
35 struct dma_device *dma = chan ? chan->device : NULL;
36 const u8 *amul, *bmul;
41 dma_addr_t dma_dest[2];
42 dma_addr_t dma_src[2];
43 struct device *dev = dma->dev;
44 struct dma_async_tx_descriptor *tx;
45 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
47 dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
48 dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
49 dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
50 tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
53 async_tx_submit(chan, tx, submit);
58 /* run the operation synchronously */
59 async_tx_quiesce(&submit->depend_tx);
60 amul = raid6_gfmul[coef[0]];
61 bmul = raid6_gfmul[coef[1]];
62 a = page_address(srcs[0]);
63 b = page_address(srcs[1]);
64 c = page_address(dest);
75 static struct dma_async_tx_descriptor *
76 async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
77 struct async_submit_ctl *submit)
79 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
80 &dest, 1, &src, 1, len);
81 struct dma_device *dma = chan ? chan->device : NULL;
82 const u8 *qmul; /* Q multiplier table */
86 dma_addr_t dma_dest[2];
87 dma_addr_t dma_src[1];
88 struct device *dev = dma->dev;
89 struct dma_async_tx_descriptor *tx;
90 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
92 dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
93 dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
94 tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
97 async_tx_submit(chan, tx, submit);
102 /* no channel available, or failed to allocate a descriptor, so
103 * perform the operation synchronously
105 async_tx_quiesce(&submit->depend_tx);
106 qmul = raid6_gfmul[coef];
107 d = page_address(dest);
108 s = page_address(src);
116 static struct dma_async_tx_descriptor *
117 __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
118 struct async_submit_ctl *submit)
120 struct dma_async_tx_descriptor *tx = NULL;
121 struct page *p, *q, *a, *b;
122 struct page *srcs[2];
123 unsigned char coef[2];
124 enum async_tx_flags flags = submit->flags;
125 dma_async_tx_callback cb_fn = submit->cb_fn;
126 void *cb_param = submit->cb_param;
127 void *scribble = submit->scribble;
135 /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
136 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
139 coef[0] = raid6_gfexi[failb-faila];
140 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
141 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
142 tx = async_sum_product(b, srcs, coef, bytes, submit);
147 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
149 tx = async_xor(a, srcs, 0, 2, bytes, submit);
155 static struct dma_async_tx_descriptor *
156 __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
157 struct async_submit_ctl *submit)
159 struct dma_async_tx_descriptor *tx = NULL;
160 struct page *p, *q, *g, *dp, *dq;
161 struct page *srcs[2];
162 unsigned char coef[2];
163 enum async_tx_flags flags = submit->flags;
164 dma_async_tx_callback cb_fn = submit->cb_fn;
165 void *cb_param = submit->cb_param;
166 void *scribble = submit->scribble;
167 int uninitialized_var(good);
170 for (i = 0; i < 3; i++) {
171 if (i == faila || i == failb)
184 /* Compute syndrome with zero for the missing data pages
185 * Use the dead data pages as temporary storage for delta p and
191 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
192 tx = async_memcpy(dp, g, 0, 0, bytes, submit);
193 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
194 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
196 /* compute P + Pxy */
199 init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL,
201 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
203 /* compute Q + Qxy */
206 init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL,
208 tx = async_xor(dq, srcs, 0, 2, bytes, submit);
210 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
213 coef[0] = raid6_gfexi[failb-faila];
214 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
215 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
216 tx = async_sum_product(dq, srcs, coef, bytes, submit);
221 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
223 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
228 static struct dma_async_tx_descriptor *
229 __2data_recov_n(int disks, size_t bytes, int faila, int failb,
230 struct page **blocks, struct async_submit_ctl *submit)
232 struct dma_async_tx_descriptor *tx = NULL;
233 struct page *p, *q, *dp, *dq;
234 struct page *srcs[2];
235 unsigned char coef[2];
236 enum async_tx_flags flags = submit->flags;
237 dma_async_tx_callback cb_fn = submit->cb_fn;
238 void *cb_param = submit->cb_param;
239 void *scribble = submit->scribble;
244 /* Compute syndrome with zero for the missing data pages
245 * Use the dead data pages as temporary storage for
246 * delta p and delta q
249 blocks[faila] = (void *)raid6_empty_zero_page;
250 blocks[disks-2] = dp;
252 blocks[failb] = (void *)raid6_empty_zero_page;
253 blocks[disks-1] = dq;
255 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
256 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
258 /* Restore pointer table */
264 /* compute P + Pxy */
267 init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL,
269 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
271 /* compute Q + Qxy */
274 init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL,
276 tx = async_xor(dq, srcs, 0, 2, bytes, submit);
278 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
281 coef[0] = raid6_gfexi[failb-faila];
282 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
283 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
284 tx = async_sum_product(dq, srcs, coef, bytes, submit);
289 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
291 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
297 * async_raid6_2data_recov - asynchronously calculate two missing data blocks
298 * @disks: number of disks in the RAID-6 array
300 * @faila: first failed drive index
301 * @failb: second failed drive index
302 * @blocks: array of source pointers where the last two entries are p and q
303 * @submit: submission/completion modifiers
305 struct dma_async_tx_descriptor *
306 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
307 struct page **blocks, struct async_submit_ctl *submit)
309 BUG_ON(faila == failb);
313 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
315 /* we need to preserve the contents of 'blocks' for the async
316 * case, so punt to synchronous if a scribble buffer is not available
318 if (!submit->scribble) {
319 void **ptrs = (void **) blocks;
322 async_tx_quiesce(&submit->depend_tx);
323 for (i = 0; i < disks; i++)
324 ptrs[i] = page_address(blocks[i]);
326 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
328 async_tx_sync_epilog(submit);
335 /* dma devices do not uniformly understand a zero source pq
336 * operation (in contrast to the synchronous case), so
337 * explicitly handle the 4 disk special case
339 return __2data_recov_4(bytes, faila, failb, blocks, submit);
341 /* dma devices do not uniformly understand a single
342 * source pq operation (in contrast to the synchronous
343 * case), so explicitly handle the 5 disk special case
345 return __2data_recov_5(bytes, faila, failb, blocks, submit);
347 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
350 EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
353 * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
354 * @disks: number of disks in the RAID-6 array
356 * @faila: failed drive index
357 * @blocks: array of source pointers where the last two entries are p and q
358 * @submit: submission/completion modifiers
360 struct dma_async_tx_descriptor *
361 async_raid6_datap_recov(int disks, size_t bytes, int faila,
362 struct page **blocks, struct async_submit_ctl *submit)
364 struct dma_async_tx_descriptor *tx = NULL;
365 struct page *p, *q, *dq;
367 enum async_tx_flags flags = submit->flags;
368 dma_async_tx_callback cb_fn = submit->cb_fn;
369 void *cb_param = submit->cb_param;
370 void *scribble = submit->scribble;
371 struct page *srcs[2];
373 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
375 /* we need to preserve the contents of 'blocks' for the async
376 * case, so punt to synchronous if a scribble buffer is not available
379 void **ptrs = (void **) blocks;
382 async_tx_quiesce(&submit->depend_tx);
383 for (i = 0; i < disks; i++)
384 ptrs[i] = page_address(blocks[i]);
386 raid6_datap_recov(disks, bytes, faila, ptrs);
388 async_tx_sync_epilog(submit);
396 /* Compute syndrome with zero for the missing data page
397 * Use the dead data page as temporary storage for delta q
400 blocks[faila] = (void *)raid6_empty_zero_page;
401 blocks[disks-1] = dq;
403 /* in the 4 disk case we only need to perform a single source
407 int good = faila == 0 ? 1 : 0;
408 struct page *g = blocks[good];
410 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
411 tx = async_memcpy(p, g, 0, 0, bytes, submit);
413 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
414 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
416 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
417 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
420 /* Restore pointer table */
424 /* calculate g^{-faila} */
425 coef = raid6_gfinv[raid6_gfexp[faila]];
429 init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL,
431 tx = async_xor(dq, srcs, 0, 2, bytes, submit);
433 init_async_submit(submit, 0, tx, NULL, NULL, scribble);
434 tx = async_mult(dq, dq, coef, bytes, submit);
438 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
440 tx = async_xor(p, srcs, 0, 2, bytes, submit);
444 EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
446 MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
447 MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
448 MODULE_LICENSE("GPL");