[XFS] implement generic xfs_btree_delete/delrec
[safe/jmp/linux-2.6] / fs / xfs / xfs_alloc_btree.c
1 /*
2  * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_btree_trace.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_alloc.h"
41 #include "xfs_error.h"
42
43
44 /*
45  * Get the data from the pointed-to record.
46  */
47 int                                     /* error */
48 xfs_alloc_get_rec(
49         xfs_btree_cur_t         *cur,   /* btree cursor */
50         xfs_agblock_t           *bno,   /* output: starting block of extent */
51         xfs_extlen_t            *len,   /* output: length of extent */
52         int                     *stat)  /* output: success/failure */
53 {
54         xfs_alloc_block_t       *block; /* btree block */
55 #ifdef DEBUG
56         int                     error;  /* error return value */
57 #endif
58         int                     ptr;    /* record number */
59
60         ptr = cur->bc_ptrs[0];
61         block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
62 #ifdef DEBUG
63         if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
64                 return error;
65 #endif
66         /*
67          * Off the right end or left end, return failure.
68          */
69         if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
70                 *stat = 0;
71                 return 0;
72         }
73         /*
74          * Point to the record and extract its data.
75          */
76         {
77                 xfs_alloc_rec_t         *rec;   /* record data */
78
79                 rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
80                 *bno = be32_to_cpu(rec->ar_startblock);
81                 *len = be32_to_cpu(rec->ar_blockcount);
82         }
83         *stat = 1;
84         return 0;
85 }
86
87
88 STATIC struct xfs_btree_cur *
89 xfs_allocbt_dup_cursor(
90         struct xfs_btree_cur    *cur)
91 {
92         return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
93                         cur->bc_private.a.agbp, cur->bc_private.a.agno,
94                         cur->bc_btnum);
95 }
96
97 STATIC void
98 xfs_allocbt_set_root(
99         struct xfs_btree_cur    *cur,
100         union xfs_btree_ptr     *ptr,
101         int                     inc)
102 {
103         struct xfs_buf          *agbp = cur->bc_private.a.agbp;
104         struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
105         xfs_agnumber_t          seqno = be32_to_cpu(agf->agf_seqno);
106         int                     btnum = cur->bc_btnum;
107
108         ASSERT(ptr->s != 0);
109
110         agf->agf_roots[btnum] = ptr->s;
111         be32_add_cpu(&agf->agf_levels[btnum], inc);
112         cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc;
113
114         xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
115 }
116
117 STATIC int
118 xfs_allocbt_alloc_block(
119         struct xfs_btree_cur    *cur,
120         union xfs_btree_ptr     *start,
121         union xfs_btree_ptr     *new,
122         int                     length,
123         int                     *stat)
124 {
125         int                     error;
126         xfs_agblock_t           bno;
127
128         XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
129
130         /* Allocate the new block from the freelist. If we can't, give up.  */
131         error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
132                                        &bno, 1);
133         if (error) {
134                 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
135                 return error;
136         }
137
138         if (bno == NULLAGBLOCK) {
139                 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
140                 *stat = 0;
141                 return 0;
142         }
143
144         xfs_trans_agbtree_delta(cur->bc_tp, 1);
145         new->s = cpu_to_be32(bno);
146
147         XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
148         *stat = 1;
149         return 0;
150 }
151
152 STATIC int
153 xfs_allocbt_free_block(
154         struct xfs_btree_cur    *cur,
155         struct xfs_buf          *bp)
156 {
157         struct xfs_buf          *agbp = cur->bc_private.a.agbp;
158         struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
159         xfs_agblock_t           bno;
160         int                     error;
161
162         bno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp));
163         error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
164         if (error)
165                 return error;
166
167         /*
168          * Since blocks move to the free list without the coordination used in
169          * xfs_bmap_finish, we can't allow block to be available for
170          * reallocation and non-transaction writing (user data) until we know
171          * that the transaction that moved it to the free list is permanently
172          * on disk. We track the blocks by declaring these blocks as "busy";
173          * the busy list is maintained on a per-ag basis and each transaction
174          * records which entries should be removed when the iclog commits to
175          * disk. If a busy block is allocated, the iclog is pushed up to the
176          * LSN that freed the block.
177          */
178         xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
179         xfs_trans_agbtree_delta(cur->bc_tp, -1);
180         return 0;
181 }
182
183 /*
184  * Update the longest extent in the AGF
185  */
186 STATIC void
187 xfs_allocbt_update_lastrec(
188         struct xfs_btree_cur    *cur,
189         struct xfs_btree_block  *block,
190         union xfs_btree_rec     *rec,
191         int                     ptr,
192         int                     reason)
193 {
194         struct xfs_agf          *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
195         xfs_agnumber_t          seqno = be32_to_cpu(agf->agf_seqno);
196         __be32                  len;
197         int                     numrecs;
198
199         ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
200
201         switch (reason) {
202         case LASTREC_UPDATE:
203                 /*
204                  * If this is the last leaf block and it's the last record,
205                  * then update the size of the longest extent in the AG.
206                  */
207                 if (ptr != xfs_btree_get_numrecs(block))
208                         return;
209                 len = rec->alloc.ar_blockcount;
210                 break;
211         case LASTREC_INSREC:
212                 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
213                     be32_to_cpu(agf->agf_longest))
214                         return;
215                 len = rec->alloc.ar_blockcount;
216                 break;
217         case LASTREC_DELREC:
218                 numrecs = xfs_btree_get_numrecs(block);
219                 if (ptr <= numrecs)
220                         return;
221                 ASSERT(ptr == numrecs + 1);
222
223                 if (numrecs) {
224                         xfs_alloc_rec_t *rrp;
225
226                         rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
227                         len = rrp->ar_blockcount;
228                 } else {
229                         len = 0;
230                 }
231
232                 break;
233         default:
234                 ASSERT(0);
235                 return;
236         }
237
238         agf->agf_longest = len;
239         cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
240         xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
241 }
242
243 STATIC int
244 xfs_allocbt_get_minrecs(
245         struct xfs_btree_cur    *cur,
246         int                     level)
247 {
248         return cur->bc_mp->m_alloc_mnr[level != 0];
249 }
250
251 STATIC int
252 xfs_allocbt_get_maxrecs(
253         struct xfs_btree_cur    *cur,
254         int                     level)
255 {
256         return cur->bc_mp->m_alloc_mxr[level != 0];
257 }
258
259 STATIC void
260 xfs_allocbt_init_key_from_rec(
261         union xfs_btree_key     *key,
262         union xfs_btree_rec     *rec)
263 {
264         ASSERT(rec->alloc.ar_startblock != 0);
265
266         key->alloc.ar_startblock = rec->alloc.ar_startblock;
267         key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
268 }
269
270 STATIC void
271 xfs_allocbt_init_rec_from_key(
272         union xfs_btree_key     *key,
273         union xfs_btree_rec     *rec)
274 {
275         ASSERT(key->alloc.ar_startblock != 0);
276
277         rec->alloc.ar_startblock = key->alloc.ar_startblock;
278         rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
279 }
280
281 STATIC void
282 xfs_allocbt_init_rec_from_cur(
283         struct xfs_btree_cur    *cur,
284         union xfs_btree_rec     *rec)
285 {
286         ASSERT(cur->bc_rec.a.ar_startblock != 0);
287
288         rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
289         rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
290 }
291
292 STATIC void
293 xfs_allocbt_init_ptr_from_cur(
294         struct xfs_btree_cur    *cur,
295         union xfs_btree_ptr     *ptr)
296 {
297         struct xfs_agf          *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
298
299         ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
300         ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
301
302         ptr->s = agf->agf_roots[cur->bc_btnum];
303 }
304
305 STATIC __int64_t
306 xfs_allocbt_key_diff(
307         struct xfs_btree_cur    *cur,
308         union xfs_btree_key     *key)
309 {
310         xfs_alloc_rec_incore_t  *rec = &cur->bc_rec.a;
311         xfs_alloc_key_t         *kp = &key->alloc;
312         __int64_t               diff;
313
314         if (cur->bc_btnum == XFS_BTNUM_BNO) {
315                 return (__int64_t)be32_to_cpu(kp->ar_startblock) -
316                                 rec->ar_startblock;
317         }
318
319         diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
320         if (diff)
321                 return diff;
322
323         return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
324 }
325
326 STATIC int
327 xfs_allocbt_kill_root(
328         struct xfs_btree_cur    *cur,
329         struct xfs_buf          *bp,
330         int                     level,
331         union xfs_btree_ptr     *newroot)
332 {
333         int                     error;
334
335         XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
336         XFS_BTREE_STATS_INC(cur, killroot);
337
338         /*
339          * Update the root pointer, decreasing the level by 1 and then
340          * free the old root.
341          */
342         xfs_allocbt_set_root(cur, newroot, -1);
343         error = xfs_allocbt_free_block(cur, bp);
344         if (error) {
345                 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
346                 return error;
347         }
348
349         XFS_BTREE_STATS_INC(cur, free);
350
351         xfs_btree_setbuf(cur, level, NULL);
352         cur->bc_nlevels--;
353
354         XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
355         return 0;
356 }
357
358 #ifdef XFS_BTREE_TRACE
359 ktrace_t        *xfs_allocbt_trace_buf;
360
361 STATIC void
362 xfs_allocbt_trace_enter(
363         struct xfs_btree_cur    *cur,
364         const char              *func,
365         char                    *s,
366         int                     type,
367         int                     line,
368         __psunsigned_t          a0,
369         __psunsigned_t          a1,
370         __psunsigned_t          a2,
371         __psunsigned_t          a3,
372         __psunsigned_t          a4,
373         __psunsigned_t          a5,
374         __psunsigned_t          a6,
375         __psunsigned_t          a7,
376         __psunsigned_t          a8,
377         __psunsigned_t          a9,
378         __psunsigned_t          a10)
379 {
380         ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
381                 (void *)func, (void *)s, NULL, (void *)cur,
382                 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
383                 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
384                 (void *)a8, (void *)a9, (void *)a10);
385 }
386
387 STATIC void
388 xfs_allocbt_trace_cursor(
389         struct xfs_btree_cur    *cur,
390         __uint32_t              *s0,
391         __uint64_t              *l0,
392         __uint64_t              *l1)
393 {
394         *s0 = cur->bc_private.a.agno;
395         *l0 = cur->bc_rec.a.ar_startblock;
396         *l1 = cur->bc_rec.a.ar_blockcount;
397 }
398
399 STATIC void
400 xfs_allocbt_trace_key(
401         struct xfs_btree_cur    *cur,
402         union xfs_btree_key     *key,
403         __uint64_t              *l0,
404         __uint64_t              *l1)
405 {
406         *l0 = be32_to_cpu(key->alloc.ar_startblock);
407         *l1 = be32_to_cpu(key->alloc.ar_blockcount);
408 }
409
410 STATIC void
411 xfs_allocbt_trace_record(
412         struct xfs_btree_cur    *cur,
413         union xfs_btree_rec     *rec,
414         __uint64_t              *l0,
415         __uint64_t              *l1,
416         __uint64_t              *l2)
417 {
418         *l0 = be32_to_cpu(rec->alloc.ar_startblock);
419         *l1 = be32_to_cpu(rec->alloc.ar_blockcount);
420         *l2 = 0;
421 }
422 #endif /* XFS_BTREE_TRACE */
423
424 static const struct xfs_btree_ops xfs_allocbt_ops = {
425         .rec_len                = sizeof(xfs_alloc_rec_t),
426         .key_len                = sizeof(xfs_alloc_key_t),
427
428         .dup_cursor             = xfs_allocbt_dup_cursor,
429         .set_root               = xfs_allocbt_set_root,
430         .kill_root              = xfs_allocbt_kill_root,
431         .alloc_block            = xfs_allocbt_alloc_block,
432         .free_block             = xfs_allocbt_free_block,
433         .update_lastrec         = xfs_allocbt_update_lastrec,
434         .get_minrecs            = xfs_allocbt_get_minrecs,
435         .get_maxrecs            = xfs_allocbt_get_maxrecs,
436         .init_key_from_rec      = xfs_allocbt_init_key_from_rec,
437         .init_rec_from_key      = xfs_allocbt_init_rec_from_key,
438         .init_rec_from_cur      = xfs_allocbt_init_rec_from_cur,
439         .init_ptr_from_cur      = xfs_allocbt_init_ptr_from_cur,
440         .key_diff               = xfs_allocbt_key_diff,
441
442 #ifdef XFS_BTREE_TRACE
443         .trace_enter            = xfs_allocbt_trace_enter,
444         .trace_cursor           = xfs_allocbt_trace_cursor,
445         .trace_key              = xfs_allocbt_trace_key,
446         .trace_record           = xfs_allocbt_trace_record,
447 #endif
448 };
449
450 /*
451  * Allocate a new allocation btree cursor.
452  */
453 struct xfs_btree_cur *                  /* new alloc btree cursor */
454 xfs_allocbt_init_cursor(
455         struct xfs_mount        *mp,            /* file system mount point */
456         struct xfs_trans        *tp,            /* transaction pointer */
457         struct xfs_buf          *agbp,          /* buffer for agf structure */
458         xfs_agnumber_t          agno,           /* allocation group number */
459         xfs_btnum_t             btnum)          /* btree identifier */
460 {
461         struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
462         struct xfs_btree_cur    *cur;
463
464         ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
465
466         cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
467
468         cur->bc_tp = tp;
469         cur->bc_mp = mp;
470         cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
471         cur->bc_btnum = btnum;
472         cur->bc_blocklog = mp->m_sb.sb_blocklog;
473
474         cur->bc_ops = &xfs_allocbt_ops;
475         if (btnum == XFS_BTNUM_CNT)
476                 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
477
478         cur->bc_private.a.agbp = agbp;
479         cur->bc_private.a.agno = agno;
480
481         return cur;
482 }