5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
47 #define EXTENT_MERGE_SIZE 5
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
54 static int8_t udf_insert_aext(struct inode *, struct extent_position,
55 kernel_lb_addr, uint32_t);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 struct extent_position *);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 * Clean-up before the specified inode is destroyed.
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
81 * Called at the last iput() if i_nlink is zero.
83 void udf_delete_inode(struct inode * inode)
85 truncate_inode_pages(&inode->i_data, 0);
87 if (is_bad_inode(inode))
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
103 void udf_clear_inode(struct inode *inode)
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
107 udf_discard_prealloc(inode);
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
117 return block_write_full_page(page, udf_get_block, wbc);
120 static int udf_readpage(struct file *file, struct page *page)
122 return block_read_full_page(page, udf_get_block);
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
127 return block_prepare_write(page, from, to, udf_get_block);
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
132 return generic_block_bmap(mapping,block,udf_get_block);
135 const struct address_space_operations udf_aops = {
136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
156 if (!UDF_I_LENALLOC(inode))
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
166 page = grab_cache_page(inode->i_mapping, 0);
167 BUG_ON(!PageLocked(page));
169 if (!PageUptodate(page))
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
191 mark_inode_dirty(inode);
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
197 struct buffer_head *dbh = NULL;
201 struct extent_position epos;
203 struct udf_fileident_bh sfibh, dfibh;
204 loff_t f_pos = udf_ext0_offset(inode) >> 2;
205 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
206 struct fileIdentDesc cfi, *sfi, *dfi;
208 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
209 alloctype = ICBTAG_FLAG_AD_SHORT;
211 alloctype = ICBTAG_FLAG_AD_LONG;
215 UDF_I_ALLOCTYPE(inode) = alloctype;
216 mark_inode_dirty(inode);
220 /* alloc block, and copy data to it */
221 *block = udf_new_block(inode->i_sb, inode,
222 UDF_I_LOCATION(inode).partitionReferenceNum,
223 UDF_I_LOCATION(inode).logicalBlockNum, err);
227 newblock = udf_get_pblock(inode->i_sb, *block,
228 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
231 dbh = udf_tgetblk(inode->i_sb, newblock);
235 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
236 set_buffer_uptodate(dbh);
238 mark_buffer_dirty_inode(dbh, inode);
240 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
241 sfibh.sbh = sfibh.ebh = NULL;
242 dfibh.soffset = dfibh.eoffset = 0;
243 dfibh.sbh = dfibh.ebh = dbh;
244 while ( (f_pos < size) )
246 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
247 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
253 UDF_I_ALLOCTYPE(inode) = alloctype;
254 sfi->descTag.tagLocation = cpu_to_le32(*block);
255 dfibh.soffset = dfibh.eoffset;
256 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
257 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
258 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
259 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
261 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
266 mark_buffer_dirty_inode(dbh, inode);
268 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
269 UDF_I_LENALLOC(inode) = 0;
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
275 epos.block = UDF_I_LOCATION(inode);
276 epos.offset = udf_file_entry_alloc_offset(inode);
277 udf_add_aext(inode, &epos, eloc, elen, 0);
281 mark_inode_dirty(inode);
285 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
288 struct buffer_head *bh;
293 phys = udf_block_map(inode, block);
295 map_bh(bh_result, inode->i_sb, phys);
308 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
310 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
311 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
316 bh = inode_getblk(inode, block, &err, &phys, &new);
323 set_buffer_new(bh_result);
324 map_bh(bh_result, inode->i_sb, phys);
330 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
334 static struct buffer_head *
335 udf_getblk(struct inode *inode, long block, int create, int *err)
337 struct buffer_head dummy;
340 dummy.b_blocknr = -1000;
341 *err = udf_get_block(inode, block, &dummy, create);
342 if (!*err && buffer_mapped(&dummy))
344 struct buffer_head *bh;
345 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
346 if (buffer_new(&dummy))
349 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
350 set_buffer_uptodate(bh);
352 mark_buffer_dirty_inode(bh, inode);
359 /* Extend the file by 'blocks' blocks, return the number of extents added */
360 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
361 kernel_long_ad *last_ext, sector_t blocks)
364 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
365 struct super_block *sb = inode->i_sb;
366 kernel_lb_addr prealloc_loc = {0, 0};
367 int prealloc_len = 0;
369 /* The previous extent is fake and we should not extend by anything
370 * - there's nothing to do... */
373 /* Round the last extent up to a multiple of block size */
374 if (last_ext->extLength & (sb->s_blocksize - 1)) {
375 last_ext->extLength =
376 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
377 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
378 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
379 UDF_I_LENEXTENTS(inode) =
380 (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
381 ~(sb->s_blocksize - 1);
383 /* Last extent are just preallocated blocks? */
384 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
385 /* Save the extent so that we can reattach it to the end */
386 prealloc_loc = last_ext->extLocation;
387 prealloc_len = last_ext->extLength;
388 /* Mark the extent as a hole */
389 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
390 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
391 last_ext->extLocation.logicalBlockNum = 0;
392 last_ext->extLocation.partitionReferenceNum = 0;
394 /* Can we merge with the previous extent? */
395 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
396 add = ((1<<30) - sb->s_blocksize - (last_ext->extLength &
397 UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
401 last_ext->extLength += add << sb->s_blocksize_bits;
405 udf_add_aext(inode, last_pos, last_ext->extLocation,
406 last_ext->extLength, 1);
410 udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
411 /* Managed to do everything necessary? */
415 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
416 last_ext->extLocation.logicalBlockNum = 0;
417 last_ext->extLocation.partitionReferenceNum = 0;
418 add = (1 << (30-sb->s_blocksize_bits)) - 1;
419 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
420 /* Create enough extents to cover the whole hole */
421 while (blocks > add) {
423 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
424 last_ext->extLength, 1) == -1)
429 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
430 (blocks << sb->s_blocksize_bits);
431 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
432 last_ext->extLength, 1) == -1)
437 /* Do we have some preallocated blocks saved? */
439 if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1)
441 last_ext->extLocation = prealloc_loc;
442 last_ext->extLength = prealloc_len;
445 /* last_pos should point to the last written extent... */
446 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
447 last_pos->offset -= sizeof(short_ad);
448 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
449 last_pos->offset -= sizeof(long_ad);
455 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
456 int *err, long *phys, int *new)
458 static sector_t last_block;
459 struct buffer_head *result = NULL;
460 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
461 struct extent_position prev_epos, cur_epos, next_epos;
462 int count = 0, startnum = 0, endnum = 0;
463 uint32_t elen = 0, tmpelen;
464 kernel_lb_addr eloc, tmpeloc;
466 loff_t lbcount = 0, b_off = 0;
467 uint32_t newblocknum, newblock;
470 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
473 prev_epos.offset = udf_file_entry_alloc_offset(inode);
474 prev_epos.block = UDF_I_LOCATION(inode);
476 cur_epos = next_epos = prev_epos;
477 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
479 /* find the extent which contains the block we are looking for.
480 alternate between laarr[0] and laarr[1] for locations of the
481 current extent, and the previous extent */
484 if (prev_epos.bh != cur_epos.bh)
486 brelse(prev_epos.bh);
488 prev_epos.bh = cur_epos.bh;
490 if (cur_epos.bh != next_epos.bh)
493 get_bh(next_epos.bh);
494 cur_epos.bh = next_epos.bh;
499 prev_epos.block = cur_epos.block;
500 cur_epos.block = next_epos.block;
502 prev_epos.offset = cur_epos.offset;
503 cur_epos.offset = next_epos.offset;
505 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
510 laarr[c].extLength = (etype << 30) | elen;
511 laarr[c].extLocation = eloc;
513 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
514 pgoal = eloc.logicalBlockNum +
515 ((elen + inode->i_sb->s_blocksize - 1) >>
516 inode->i_sb->s_blocksize_bits);
519 } while (lbcount + elen <= b_off);
522 offset = b_off >> inode->i_sb->s_blocksize_bits;
524 * Move prev_epos and cur_epos into indirect extent if we are at
527 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
528 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
530 /* if the extent is allocated and recorded, return the block
531 if the extent is not a multiple of the blocksize, round up */
533 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
535 if (elen & (inode->i_sb->s_blocksize - 1))
537 elen = EXT_RECORDED_ALLOCATED |
538 ((elen + inode->i_sb->s_blocksize - 1) &
539 ~(inode->i_sb->s_blocksize - 1));
540 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
542 brelse(prev_epos.bh);
544 brelse(next_epos.bh);
545 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
551 /* Are we beyond EOF? */
562 /* Create a fake extent when there's not one */
563 memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
564 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
565 /* Will udf_extend_file() create real extent from a fake one? */
566 startnum = (offset > 0);
568 /* Create extents for the hole between EOF and offset */
569 ret = udf_extend_file(inode, &prev_epos, laarr, offset);
571 brelse(prev_epos.bh);
573 brelse(next_epos.bh);
574 /* We don't really know the error here so we just make
582 /* We are not covered by a preallocated extent? */
583 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) {
584 /* Is there any real extent? - otherwise we overwrite
588 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
589 inode->i_sb->s_blocksize;
590 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
598 endnum = startnum = ((count > 2) ? 2 : count);
600 /* if the current extent is in position 0, swap it with the previous */
601 if (!c && count != 1)
609 /* if the current block is located in an extent, read the next extent */
610 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
612 laarr[c+1].extLength = (etype << 30) | elen;
613 laarr[c+1].extLocation = eloc;
623 /* if the current extent is not recorded but allocated, get the
624 block in the extent corresponding to the requested block */
625 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
626 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
627 else /* otherwise, allocate a new block */
629 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
630 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
635 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
638 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
639 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
641 brelse(prev_epos.bh);
645 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
648 /* if the extent the requsted block is located in contains multiple blocks,
649 split the extent into at most three extents. blocks prior to requested
650 block, requested block, and blocks after requested block */
651 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
653 #ifdef UDF_PREALLOCATE
654 /* preallocate blocks */
655 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
658 /* merge any continuous blocks in laarr */
659 udf_merge_extents(inode, laarr, &endnum);
661 /* write back the new extents, inserting new extents if the new number
662 of extents is greater than the old number, and deleting extents if
663 the new number of extents is less than the old number */
664 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
666 brelse(prev_epos.bh);
668 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
669 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
676 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
677 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
678 inode->i_ctime = current_fs_time(inode->i_sb);
681 udf_sync_inode(inode);
683 mark_inode_dirty(inode);
687 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
688 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
690 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
691 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
694 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
695 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
696 int8_t etype = (laarr[curr].extLength >> 30);
700 else if (!offset || blen == offset + 1)
702 laarr[curr+2] = laarr[curr+1];
703 laarr[curr+1] = laarr[curr];
707 laarr[curr+3] = laarr[curr+1];
708 laarr[curr+2] = laarr[curr+1] = laarr[curr];
713 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
715 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
716 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
717 (offset << inode->i_sb->s_blocksize_bits);
718 laarr[curr].extLocation.logicalBlockNum = 0;
719 laarr[curr].extLocation.partitionReferenceNum = 0;
722 laarr[curr].extLength = (etype << 30) |
723 (offset << inode->i_sb->s_blocksize_bits);
729 laarr[curr].extLocation.logicalBlockNum = newblocknum;
730 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
731 laarr[curr].extLocation.partitionReferenceNum =
732 UDF_I_LOCATION(inode).partitionReferenceNum;
733 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
734 inode->i_sb->s_blocksize;
737 if (blen != offset + 1)
739 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
740 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
741 laarr[curr].extLength = (etype << 30) |
742 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
749 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
750 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
752 int start, length = 0, currlength = 0, i;
754 if (*endnum >= (c+1))
763 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
766 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
767 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
773 for (i=start+1; i<=*endnum; i++)
778 length += UDF_DEFAULT_PREALLOC_BLOCKS;
780 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
781 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
782 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
789 int next = laarr[start].extLocation.logicalBlockNum +
790 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
791 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
792 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
793 laarr[start].extLocation.partitionReferenceNum,
794 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
795 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
800 laarr[start].extLength +=
801 (numalloc << inode->i_sb->s_blocksize_bits);
804 memmove(&laarr[c+2], &laarr[c+1],
805 sizeof(long_ad) * (*endnum - (c+1)));
807 laarr[c+1].extLocation.logicalBlockNum = next;
808 laarr[c+1].extLocation.partitionReferenceNum =
809 laarr[c].extLocation.partitionReferenceNum;
810 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
811 (numalloc << inode->i_sb->s_blocksize_bits);
815 for (i=start+1; numalloc && i<*endnum; i++)
817 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
818 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
822 laarr[i].extLength -=
823 (numalloc << inode->i_sb->s_blocksize_bits);
830 memmove(&laarr[i], &laarr[i+1],
831 sizeof(long_ad) * (*endnum - (i+1)));
836 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
841 static void udf_merge_extents(struct inode *inode,
842 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
846 for (i=0; i<(*endnum-1); i++)
848 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
850 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
851 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
852 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
853 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
855 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
856 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
857 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
859 laarr[i+1].extLength = (laarr[i+1].extLength -
860 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
861 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
862 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
863 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
864 laarr[i+1].extLocation.logicalBlockNum =
865 laarr[i].extLocation.logicalBlockNum +
866 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
867 inode->i_sb->s_blocksize_bits);
871 laarr[i].extLength = laarr[i+1].extLength +
872 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
873 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
875 memmove(&laarr[i+1], &laarr[i+2],
876 sizeof(long_ad) * (*endnum - (i+2)));
882 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
883 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
885 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
886 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
887 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
888 laarr[i].extLocation.logicalBlockNum = 0;
889 laarr[i].extLocation.partitionReferenceNum = 0;
891 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
892 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
893 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
895 laarr[i+1].extLength = (laarr[i+1].extLength -
896 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
897 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
898 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
899 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
903 laarr[i].extLength = laarr[i+1].extLength +
904 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
905 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
907 memmove(&laarr[i+1], &laarr[i+2],
908 sizeof(long_ad) * (*endnum - (i+2)));
913 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
915 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
916 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
917 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
918 laarr[i].extLocation.logicalBlockNum = 0;
919 laarr[i].extLocation.partitionReferenceNum = 0;
920 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
921 EXT_NOT_RECORDED_NOT_ALLOCATED;
926 static void udf_update_extents(struct inode *inode,
927 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
928 struct extent_position *epos)
931 kernel_lb_addr tmploc;
934 if (startnum > endnum)
936 for (i=0; i<(startnum-endnum); i++)
937 udf_delete_aext(inode, *epos, laarr[i].extLocation,
940 else if (startnum < endnum)
942 for (i=0; i<(endnum-startnum); i++)
944 udf_insert_aext(inode, *epos, laarr[i].extLocation,
946 udf_next_aext(inode, epos, &laarr[i].extLocation,
947 &laarr[i].extLength, 1);
952 for (i=start; i<endnum; i++)
954 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
955 udf_write_aext(inode, epos, laarr[i].extLocation,
956 laarr[i].extLength, 1);
960 struct buffer_head * udf_bread(struct inode * inode, int block,
961 int create, int * err)
963 struct buffer_head * bh = NULL;
965 bh = udf_getblk(inode, block, create, err);
969 if (buffer_uptodate(bh))
971 ll_rw_block(READ, 1, &bh);
973 if (buffer_uptodate(bh))
980 void udf_truncate(struct inode * inode)
985 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
986 S_ISLNK(inode->i_mode)))
988 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
992 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
994 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
997 udf_expand_file_adinicb(inode, inode->i_size, &err);
998 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1000 inode->i_size = UDF_I_LENALLOC(inode);
1005 udf_truncate_extents(inode);
1009 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1010 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
1011 UDF_I_LENALLOC(inode) = inode->i_size;
1016 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
1017 udf_truncate_extents(inode);
1020 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1022 udf_sync_inode (inode);
1024 mark_inode_dirty(inode);
1029 __udf_read_inode(struct inode *inode)
1031 struct buffer_head *bh = NULL;
1032 struct fileEntry *fe;
1036 * Set defaults, but the inode is still incomplete!
1037 * Note: get_new_inode() sets the following on a new inode:
1040 * i_flags = sb->s_flags
1042 * clean_inode(): zero fills and sets
1047 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
1051 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1053 make_bad_inode(inode);
1057 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1058 ident != TAG_IDENT_USE)
1060 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
1061 inode->i_ino, ident);
1063 make_bad_inode(inode);
1067 fe = (struct fileEntry *)bh->b_data;
1069 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
1071 struct buffer_head *ibh = NULL, *nbh = NULL;
1072 struct indirectEntry *ie;
1074 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
1075 if (ident == TAG_IDENT_IE)
1080 ie = (struct indirectEntry *)ibh->b_data;
1082 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1084 if (ie->indirectICB.extLength &&
1085 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
1087 if (ident == TAG_IDENT_FE ||
1088 ident == TAG_IDENT_EFE)
1090 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
1094 __udf_read_inode(inode);
1110 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1112 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1113 le16_to_cpu(fe->icbTag.strategyType));
1115 make_bad_inode(inode);
1118 udf_fill_inode(inode, bh);
1123 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1125 struct fileEntry *fe;
1126 struct extendedFileEntry *efe;
1131 fe = (struct fileEntry *)bh->b_data;
1132 efe = (struct extendedFileEntry *)bh->b_data;
1134 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1135 UDF_I_STRAT4096(inode) = 0;
1136 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1137 UDF_I_STRAT4096(inode) = 1;
1139 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1140 UDF_I_UNIQUE(inode) = 0;
1141 UDF_I_LENEATTR(inode) = 0;
1142 UDF_I_LENEXTENTS(inode) = 0;
1143 UDF_I_LENALLOC(inode) = 0;
1144 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1145 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1146 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1148 UDF_I_EFE(inode) = 1;
1149 UDF_I_USE(inode) = 0;
1150 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1151 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1153 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1155 UDF_I_EFE(inode) = 0;
1156 UDF_I_USE(inode) = 0;
1157 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1158 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1160 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1162 UDF_I_EFE(inode) = 0;
1163 UDF_I_USE(inode) = 1;
1164 UDF_I_LENALLOC(inode) =
1166 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1167 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1168 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1172 inode->i_uid = le32_to_cpu(fe->uid);
1173 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1174 UDF_FLAG_UID_IGNORE))
1175 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1177 inode->i_gid = le32_to_cpu(fe->gid);
1178 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1179 UDF_FLAG_GID_IGNORE))
1180 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1182 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1183 if (!inode->i_nlink)
1186 inode->i_size = le64_to_cpu(fe->informationLength);
1187 UDF_I_LENEXTENTS(inode) = inode->i_size;
1189 inode->i_mode = udf_convert_permissions(fe);
1190 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1192 if (UDF_I_EFE(inode) == 0)
1194 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1195 (inode->i_sb->s_blocksize_bits - 9);
1197 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1198 lets_to_cpu(fe->accessTime)) )
1200 inode->i_atime.tv_sec = convtime;
1201 inode->i_atime.tv_nsec = convtime_usec * 1000;
1205 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1208 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1209 lets_to_cpu(fe->modificationTime)) )
1211 inode->i_mtime.tv_sec = convtime;
1212 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1216 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1219 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1220 lets_to_cpu(fe->attrTime)) )
1222 inode->i_ctime.tv_sec = convtime;
1223 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1227 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1230 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1231 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1232 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1233 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1237 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1238 (inode->i_sb->s_blocksize_bits - 9);
1240 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1241 lets_to_cpu(efe->accessTime)) )
1243 inode->i_atime.tv_sec = convtime;
1244 inode->i_atime.tv_nsec = convtime_usec * 1000;
1248 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1251 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1252 lets_to_cpu(efe->modificationTime)) )
1254 inode->i_mtime.tv_sec = convtime;
1255 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1259 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1262 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1263 lets_to_cpu(efe->createTime)) )
1265 UDF_I_CRTIME(inode).tv_sec = convtime;
1266 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1270 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1273 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1274 lets_to_cpu(efe->attrTime)) )
1276 inode->i_ctime.tv_sec = convtime;
1277 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1281 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1284 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1285 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1286 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1287 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1290 switch (fe->icbTag.fileType)
1292 case ICBTAG_FILE_TYPE_DIRECTORY:
1294 inode->i_op = &udf_dir_inode_operations;
1295 inode->i_fop = &udf_dir_operations;
1296 inode->i_mode |= S_IFDIR;
1300 case ICBTAG_FILE_TYPE_REALTIME:
1301 case ICBTAG_FILE_TYPE_REGULAR:
1302 case ICBTAG_FILE_TYPE_UNDEF:
1304 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1305 inode->i_data.a_ops = &udf_adinicb_aops;
1307 inode->i_data.a_ops = &udf_aops;
1308 inode->i_op = &udf_file_inode_operations;
1309 inode->i_fop = &udf_file_operations;
1310 inode->i_mode |= S_IFREG;
1313 case ICBTAG_FILE_TYPE_BLOCK:
1315 inode->i_mode |= S_IFBLK;
1318 case ICBTAG_FILE_TYPE_CHAR:
1320 inode->i_mode |= S_IFCHR;
1323 case ICBTAG_FILE_TYPE_FIFO:
1325 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1328 case ICBTAG_FILE_TYPE_SOCKET:
1330 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1333 case ICBTAG_FILE_TYPE_SYMLINK:
1335 inode->i_data.a_ops = &udf_symlink_aops;
1336 inode->i_op = &page_symlink_inode_operations;
1337 inode->i_mode = S_IFLNK|S_IRWXUGO;
1342 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1343 inode->i_ino, fe->icbTag.fileType);
1344 make_bad_inode(inode);
1348 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1350 struct deviceSpec *dsea =
1351 (struct deviceSpec *)
1352 udf_get_extendedattr(inode, 12, 1);
1356 init_special_inode(inode, inode->i_mode, MKDEV(
1357 le32_to_cpu(dsea->majorDeviceIdent),
1358 le32_to_cpu(dsea->minorDeviceIdent)));
1359 /* Developer ID ??? */
1363 make_bad_inode(inode);
1369 udf_convert_permissions(struct fileEntry *fe)
1372 uint32_t permissions;
1375 permissions = le32_to_cpu(fe->permissions);
1376 flags = le16_to_cpu(fe->icbTag.flags);
1378 mode = (( permissions ) & S_IRWXO) |
1379 (( permissions >> 2 ) & S_IRWXG) |
1380 (( permissions >> 4 ) & S_IRWXU) |
1381 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1382 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1383 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1392 * Write out the specified inode.
1395 * This routine is called whenever an inode is synced.
1396 * Currently this routine is just a placeholder.
1399 * July 1, 1997 - Andrew E. Mileski
1400 * Written, tested, and released.
1403 int udf_write_inode(struct inode * inode, int sync)
1407 ret = udf_update_inode(inode, sync);
1412 int udf_sync_inode(struct inode * inode)
1414 return udf_update_inode(inode, 1);
1418 udf_update_inode(struct inode *inode, int do_sync)
1420 struct buffer_head *bh = NULL;
1421 struct fileEntry *fe;
1422 struct extendedFileEntry *efe;
1427 kernel_timestamp cpu_time;
1430 bh = udf_tread(inode->i_sb,
1431 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1435 udf_debug("bread failure\n");
1439 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1441 fe = (struct fileEntry *)bh->b_data;
1442 efe = (struct extendedFileEntry *)bh->b_data;
1444 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1446 struct unallocSpaceEntry *use =
1447 (struct unallocSpaceEntry *)bh->b_data;
1449 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1450 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1451 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1453 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1454 use->descTag.descCRCLength = cpu_to_le16(crclen);
1455 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1457 use->descTag.tagChecksum = 0;
1458 for (i=0; i<16; i++)
1460 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1462 mark_buffer_dirty(bh);
1467 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1468 fe->uid = cpu_to_le32(-1);
1469 else fe->uid = cpu_to_le32(inode->i_uid);
1471 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1472 fe->gid = cpu_to_le32(-1);
1473 else fe->gid = cpu_to_le32(inode->i_gid);
1475 udfperms = ((inode->i_mode & S_IRWXO) ) |
1476 ((inode->i_mode & S_IRWXG) << 2) |
1477 ((inode->i_mode & S_IRWXU) << 4);
1479 udfperms |= (le32_to_cpu(fe->permissions) &
1480 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1481 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1482 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1483 fe->permissions = cpu_to_le32(udfperms);
1485 if (S_ISDIR(inode->i_mode))
1486 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1488 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1490 fe->informationLength = cpu_to_le64(inode->i_size);
1492 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1495 struct deviceSpec *dsea =
1496 (struct deviceSpec *)
1497 udf_get_extendedattr(inode, 12, 1);
1501 dsea = (struct deviceSpec *)
1502 udf_add_extendedattr(inode,
1503 sizeof(struct deviceSpec) +
1504 sizeof(regid), 12, 0x3);
1505 dsea->attrType = cpu_to_le32(12);
1506 dsea->attrSubtype = 1;
1507 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1509 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1511 eid = (regid *)dsea->impUse;
1512 memset(eid, 0, sizeof(regid));
1513 strcpy(eid->ident, UDF_ID_DEVELOPER);
1514 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1515 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1516 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1517 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1520 if (UDF_I_EFE(inode) == 0)
1522 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1523 fe->logicalBlocksRecorded = cpu_to_le64(
1524 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1525 (inode->i_sb->s_blocksize_bits - 9));
1527 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1528 fe->accessTime = cpu_to_lets(cpu_time);
1529 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1530 fe->modificationTime = cpu_to_lets(cpu_time);
1531 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1532 fe->attrTime = cpu_to_lets(cpu_time);
1533 memset(&(fe->impIdent), 0, sizeof(regid));
1534 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1535 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1536 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1537 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1538 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1539 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1540 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1541 crclen = sizeof(struct fileEntry);
1545 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1546 efe->objectSize = cpu_to_le64(inode->i_size);
1547 efe->logicalBlocksRecorded = cpu_to_le64(
1548 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1549 (inode->i_sb->s_blocksize_bits - 9));
1551 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1552 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1553 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1555 UDF_I_CRTIME(inode) = inode->i_atime;
1557 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1558 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1559 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1561 UDF_I_CRTIME(inode) = inode->i_mtime;
1563 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1564 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1565 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1567 UDF_I_CRTIME(inode) = inode->i_ctime;
1570 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1571 efe->accessTime = cpu_to_lets(cpu_time);
1572 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1573 efe->modificationTime = cpu_to_lets(cpu_time);
1574 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1575 efe->createTime = cpu_to_lets(cpu_time);
1576 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1577 efe->attrTime = cpu_to_lets(cpu_time);
1579 memset(&(efe->impIdent), 0, sizeof(regid));
1580 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1581 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1582 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1583 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1584 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1585 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1586 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1587 crclen = sizeof(struct extendedFileEntry);
1589 if (UDF_I_STRAT4096(inode))
1591 fe->icbTag.strategyType = cpu_to_le16(4096);
1592 fe->icbTag.strategyParameter = cpu_to_le16(1);
1593 fe->icbTag.numEntries = cpu_to_le16(2);
1597 fe->icbTag.strategyType = cpu_to_le16(4);
1598 fe->icbTag.numEntries = cpu_to_le16(1);
1601 if (S_ISDIR(inode->i_mode))
1602 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1603 else if (S_ISREG(inode->i_mode))
1604 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1605 else if (S_ISLNK(inode->i_mode))
1606 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1607 else if (S_ISBLK(inode->i_mode))
1608 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1609 else if (S_ISCHR(inode->i_mode))
1610 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1611 else if (S_ISFIFO(inode->i_mode))
1612 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1613 else if (S_ISSOCK(inode->i_mode))
1614 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1616 icbflags = UDF_I_ALLOCTYPE(inode) |
1617 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1618 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1619 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1620 (le16_to_cpu(fe->icbTag.flags) &
1621 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1622 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1624 fe->icbTag.flags = cpu_to_le16(icbflags);
1625 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1626 fe->descTag.descVersion = cpu_to_le16(3);
1628 fe->descTag.descVersion = cpu_to_le16(2);
1629 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1630 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1631 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1632 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1633 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1635 fe->descTag.tagChecksum = 0;
1636 for (i=0; i<16; i++)
1638 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1640 /* write the data blocks */
1641 mark_buffer_dirty(bh);
1644 sync_dirty_buffer(bh);
1645 if (buffer_req(bh) && !buffer_uptodate(bh))
1647 printk("IO error syncing udf inode [%s:%08lx]\n",
1648 inode->i_sb->s_id, inode->i_ino);
1657 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1659 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1660 struct inode *inode = iget_locked(sb, block);
1665 if (inode->i_state & I_NEW) {
1666 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1667 __udf_read_inode(inode);
1668 unlock_new_inode(inode);
1671 if (is_bad_inode(inode))
1674 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1675 udf_debug("block=%d, partition=%d out of range\n",
1676 ino.logicalBlockNum, ino.partitionReferenceNum);
1677 make_bad_inode(inode);
1688 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1689 kernel_lb_addr eloc, uint32_t elen, int inc)
1692 short_ad *sad = NULL;
1693 long_ad *lad = NULL;
1694 struct allocExtDesc *aed;
1699 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1701 ptr = epos->bh->b_data + epos->offset;
1703 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1704 adsize = sizeof(short_ad);
1705 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1706 adsize = sizeof(long_ad);
1710 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
1713 struct buffer_head *nbh;
1715 kernel_lb_addr obloc = epos->block;
1717 if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1718 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1722 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1728 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1729 set_buffer_uptodate(nbh);
1731 mark_buffer_dirty_inode(nbh, inode);
1733 aed = (struct allocExtDesc *)(nbh->b_data);
1734 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1735 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1736 if (epos->offset + adsize > inode->i_sb->s_blocksize)
1738 loffset = epos->offset;
1739 aed->lengthAllocDescs = cpu_to_le32(adsize);
1740 sptr = ptr - adsize;
1741 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1742 memcpy(dptr, sptr, adsize);
1743 epos->offset = sizeof(struct allocExtDesc) + adsize;
1747 loffset = epos->offset + adsize;
1748 aed->lengthAllocDescs = cpu_to_le32(0);
1750 epos->offset = sizeof(struct allocExtDesc);
1754 aed = (struct allocExtDesc *)epos->bh->b_data;
1755 aed->lengthAllocDescs =
1756 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1760 UDF_I_LENALLOC(inode) += adsize;
1761 mark_inode_dirty(inode);
1764 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1765 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1766 epos->block.logicalBlockNum, sizeof(tag));
1768 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1769 epos->block.logicalBlockNum, sizeof(tag));
1770 switch (UDF_I_ALLOCTYPE(inode))
1772 case ICBTAG_FLAG_AD_SHORT:
1774 sad = (short_ad *)sptr;
1775 sad->extLength = cpu_to_le32(
1776 EXT_NEXT_EXTENT_ALLOCDECS |
1777 inode->i_sb->s_blocksize);
1778 sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
1781 case ICBTAG_FLAG_AD_LONG:
1783 lad = (long_ad *)sptr;
1784 lad->extLength = cpu_to_le32(
1785 EXT_NEXT_EXTENT_ALLOCDECS |
1786 inode->i_sb->s_blocksize);
1787 lad->extLocation = cpu_to_lelb(epos->block);
1788 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1794 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1795 udf_update_tag(epos->bh->b_data, loffset);
1797 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1798 mark_buffer_dirty_inode(epos->bh, inode);
1802 mark_inode_dirty(inode);
1806 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1810 UDF_I_LENALLOC(inode) += adsize;
1811 mark_inode_dirty(inode);
1815 aed = (struct allocExtDesc *)epos->bh->b_data;
1816 aed->lengthAllocDescs =
1817 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1818 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1819 udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
1821 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1822 mark_buffer_dirty_inode(epos->bh, inode);
1828 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1829 kernel_lb_addr eloc, uint32_t elen, int inc)
1835 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1837 ptr = epos->bh->b_data + epos->offset;
1839 switch (UDF_I_ALLOCTYPE(inode))
1841 case ICBTAG_FLAG_AD_SHORT:
1843 short_ad *sad = (short_ad *)ptr;
1844 sad->extLength = cpu_to_le32(elen);
1845 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1846 adsize = sizeof(short_ad);
1849 case ICBTAG_FLAG_AD_LONG:
1851 long_ad *lad = (long_ad *)ptr;
1852 lad->extLength = cpu_to_le32(elen);
1853 lad->extLocation = cpu_to_lelb(eloc);
1854 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1855 adsize = sizeof(long_ad);
1864 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1866 struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
1867 udf_update_tag(epos->bh->b_data,
1868 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1870 mark_buffer_dirty_inode(epos->bh, inode);
1873 mark_inode_dirty(inode);
1876 epos->offset += adsize;
1877 return (elen >> 30);
1880 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1881 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1885 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1886 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1888 epos->block = *eloc;
1889 epos->offset = sizeof(struct allocExtDesc);
1891 if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
1893 udf_debug("reading block %d failed!\n",
1894 udf_get_lb_pblock(inode->i_sb, epos->block, 0));
1902 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1903 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1912 epos->offset = udf_file_entry_alloc_offset(inode);
1913 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1914 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1919 epos->offset = sizeof(struct allocExtDesc);
1920 ptr = epos->bh->b_data + epos->offset;
1921 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
1924 switch (UDF_I_ALLOCTYPE(inode))
1926 case ICBTAG_FLAG_AD_SHORT:
1930 if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
1933 etype = le32_to_cpu(sad->extLength) >> 30;
1934 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1935 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1936 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1939 case ICBTAG_FLAG_AD_LONG:
1943 if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
1946 etype = le32_to_cpu(lad->extLength) >> 30;
1947 *eloc = lelb_to_cpu(lad->extLocation);
1948 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1953 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1962 udf_insert_aext(struct inode *inode, struct extent_position epos,
1963 kernel_lb_addr neloc, uint32_t nelen)
1965 kernel_lb_addr oeloc;
1972 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
1974 udf_write_aext(inode, &epos, neloc, nelen, 1);
1977 nelen = (etype << 30) | oelen;
1979 udf_add_aext(inode, &epos, neloc, nelen, 1);
1981 return (nelen >> 30);
1984 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1985 kernel_lb_addr eloc, uint32_t elen)
1987 struct extent_position oepos;
1990 struct allocExtDesc *aed;
1998 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1999 adsize = sizeof(short_ad);
2000 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
2001 adsize = sizeof(long_ad);
2006 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2009 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
2011 udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
2012 if (oepos.bh != epos.bh)
2014 oepos.block = epos.block;
2018 oepos.offset = epos.offset - adsize;
2021 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
2024 if (epos.bh != oepos.bh)
2026 udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
2027 udf_write_aext(inode, &oepos, eloc, elen, 1);
2028 udf_write_aext(inode, &oepos, eloc, elen, 1);
2031 UDF_I_LENALLOC(inode) -= (adsize * 2);
2032 mark_inode_dirty(inode);
2036 aed = (struct allocExtDesc *)oepos.bh->b_data;
2037 aed->lengthAllocDescs =
2038 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
2039 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2040 udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
2042 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2043 mark_buffer_dirty_inode(oepos.bh, inode);
2048 udf_write_aext(inode, &oepos, eloc, elen, 1);
2051 UDF_I_LENALLOC(inode) -= adsize;
2052 mark_inode_dirty(inode);
2056 aed = (struct allocExtDesc *)oepos.bh->b_data;
2057 aed->lengthAllocDescs =
2058 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2059 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2060 udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
2062 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2063 mark_buffer_dirty_inode(oepos.bh, inode);
2069 return (elen >> 30);
2072 int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
2073 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
2075 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
2080 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2085 pos->block = UDF_I_LOCATION(inode);
2091 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
2093 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
2094 UDF_I_LENEXTENTS(inode) = lbcount;
2098 } while (lbcount <= bcount);
2100 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
2105 long udf_block_map(struct inode *inode, sector_t block)
2107 kernel_lb_addr eloc;
2110 struct extent_position epos = { NULL, 0, { 0, 0}};
2115 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
2116 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2123 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2124 return udf_fixed_to_variable(ret);