Linux-2.6.12-rc2
[safe/jmp/linux-2.6] / fs / affs / file.c
1 /*
2  *  linux/fs/affs/file.c
3  *
4  *  (c) 1996  Hans-Joachim Widmaier - Rewritten
5  *
6  *  (C) 1993  Ray Burr - Modified for Amiga FFS filesystem.
7  *
8  *  (C) 1992  Eric Youngdale Modified for ISO 9660 filesystem.
9  *
10  *  (C) 1991  Linus Torvalds - minix filesystem
11  *
12  *  affs regular file handling primitives
13  */
14
15 #include "affs.h"
16
17 #if PAGE_SIZE < 4096
18 #error PAGE_SIZE must be at least 4096
19 #endif
20
21 static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
22 static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
23 static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
24 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
25 static ssize_t affs_file_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos);
26 static int affs_file_open(struct inode *inode, struct file *filp);
27 static int affs_file_release(struct inode *inode, struct file *filp);
28
29 struct file_operations affs_file_operations = {
30         .llseek         = generic_file_llseek,
31         .read           = generic_file_read,
32         .write          = affs_file_write,
33         .mmap           = generic_file_mmap,
34         .open           = affs_file_open,
35         .release        = affs_file_release,
36         .fsync          = file_fsync,
37         .sendfile       = generic_file_sendfile,
38 };
39
40 struct inode_operations affs_file_inode_operations = {
41         .truncate       = affs_truncate,
42         .setattr        = affs_notify_change,
43 };
44
45 static int
46 affs_file_open(struct inode *inode, struct file *filp)
47 {
48         if (atomic_read(&filp->f_count) != 1)
49                 return 0;
50         pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt);
51         AFFS_I(inode)->i_opencnt++;
52         return 0;
53 }
54
55 static int
56 affs_file_release(struct inode *inode, struct file *filp)
57 {
58         if (atomic_read(&filp->f_count) != 0)
59                 return 0;
60         pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt);
61         AFFS_I(inode)->i_opencnt--;
62         if (!AFFS_I(inode)->i_opencnt)
63                 affs_free_prealloc(inode);
64
65         return 0;
66 }
67
68 static int
69 affs_grow_extcache(struct inode *inode, u32 lc_idx)
70 {
71         struct super_block      *sb = inode->i_sb;
72         struct buffer_head      *bh;
73         u32 lc_max;
74         int i, j, key;
75
76         if (!AFFS_I(inode)->i_lc) {
77                 char *ptr = (char *)get_zeroed_page(GFP_NOFS);
78                 if (!ptr)
79                         return -ENOMEM;
80                 AFFS_I(inode)->i_lc = (u32 *)ptr;
81                 AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
82         }
83
84         lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
85
86         if (AFFS_I(inode)->i_extcnt > lc_max) {
87                 u32 lc_shift, lc_mask, tmp, off;
88
89                 /* need to recalculate linear cache, start from old size */
90                 lc_shift = AFFS_I(inode)->i_lc_shift;
91                 tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
92                 for (; tmp; tmp >>= 1)
93                         lc_shift++;
94                 lc_mask = (1 << lc_shift) - 1;
95
96                 /* fix idx and old size to new shift */
97                 lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
98                 AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
99
100                 /* first shrink old cache to make more space */
101                 off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
102                 for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
103                         AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
104
105                 AFFS_I(inode)->i_lc_shift = lc_shift;
106                 AFFS_I(inode)->i_lc_mask = lc_mask;
107         }
108
109         /* fill cache to the needed index */
110         i = AFFS_I(inode)->i_lc_size;
111         AFFS_I(inode)->i_lc_size = lc_idx + 1;
112         for (; i <= lc_idx; i++) {
113                 if (!i) {
114                         AFFS_I(inode)->i_lc[0] = inode->i_ino;
115                         continue;
116                 }
117                 key = AFFS_I(inode)->i_lc[i - 1];
118                 j = AFFS_I(inode)->i_lc_mask + 1;
119                 // unlock cache
120                 for (; j > 0; j--) {
121                         bh = affs_bread(sb, key);
122                         if (!bh)
123                                 goto err;
124                         key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
125                         affs_brelse(bh);
126                 }
127                 // lock cache
128                 AFFS_I(inode)->i_lc[i] = key;
129         }
130
131         return 0;
132
133 err:
134         // lock cache
135         return -EIO;
136 }
137
138 static struct buffer_head *
139 affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
140 {
141         struct super_block *sb = inode->i_sb;
142         struct buffer_head *new_bh;
143         u32 blocknr, tmp;
144
145         blocknr = affs_alloc_block(inode, bh->b_blocknr);
146         if (!blocknr)
147                 return ERR_PTR(-ENOSPC);
148
149         new_bh = affs_getzeroblk(sb, blocknr);
150         if (!new_bh) {
151                 affs_free_block(sb, blocknr);
152                 return ERR_PTR(-EIO);
153         }
154
155         AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
156         AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
157         AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
158         AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
159         affs_fix_checksum(sb, new_bh);
160
161         mark_buffer_dirty_inode(new_bh, inode);
162
163         tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
164         if (tmp)
165                 affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
166         AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
167         affs_adjust_checksum(bh, blocknr - tmp);
168         mark_buffer_dirty_inode(bh, inode);
169
170         AFFS_I(inode)->i_extcnt++;
171         mark_inode_dirty(inode);
172
173         return new_bh;
174 }
175
176 static inline struct buffer_head *
177 affs_get_extblock(struct inode *inode, u32 ext)
178 {
179         /* inline the simplest case: same extended block as last time */
180         struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
181         if (ext == AFFS_I(inode)->i_ext_last)
182                 atomic_inc(&bh->b_count);
183         else
184                 /* we have to do more (not inlined) */
185                 bh = affs_get_extblock_slow(inode, ext);
186
187         return bh;
188 }
189
190 static struct buffer_head *
191 affs_get_extblock_slow(struct inode *inode, u32 ext)
192 {
193         struct super_block *sb = inode->i_sb;
194         struct buffer_head *bh;
195         u32 ext_key;
196         u32 lc_idx, lc_off, ac_idx;
197         u32 tmp, idx;
198
199         if (ext == AFFS_I(inode)->i_ext_last + 1) {
200                 /* read the next extended block from the current one */
201                 bh = AFFS_I(inode)->i_ext_bh;
202                 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
203                 if (ext < AFFS_I(inode)->i_extcnt)
204                         goto read_ext;
205                 if (ext > AFFS_I(inode)->i_extcnt)
206                         BUG();
207                 bh = affs_alloc_extblock(inode, bh, ext);
208                 if (IS_ERR(bh))
209                         return bh;
210                 goto store_ext;
211         }
212
213         if (ext == 0) {
214                 /* we seek back to the file header block */
215                 ext_key = inode->i_ino;
216                 goto read_ext;
217         }
218
219         if (ext >= AFFS_I(inode)->i_extcnt) {
220                 struct buffer_head *prev_bh;
221
222                 /* allocate a new extended block */
223                 if (ext > AFFS_I(inode)->i_extcnt)
224                         BUG();
225
226                 /* get previous extended block */
227                 prev_bh = affs_get_extblock(inode, ext - 1);
228                 if (IS_ERR(prev_bh))
229                         return prev_bh;
230                 bh = affs_alloc_extblock(inode, prev_bh, ext);
231                 affs_brelse(prev_bh);
232                 if (IS_ERR(bh))
233                         return bh;
234                 goto store_ext;
235         }
236
237 again:
238         /* check if there is an extended cache and whether it's large enough */
239         lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
240         lc_off = ext & AFFS_I(inode)->i_lc_mask;
241
242         if (lc_idx >= AFFS_I(inode)->i_lc_size) {
243                 int err;
244
245                 err = affs_grow_extcache(inode, lc_idx);
246                 if (err)
247                         return ERR_PTR(err);
248                 goto again;
249         }
250
251         /* every n'th key we find in the linear cache */
252         if (!lc_off) {
253                 ext_key = AFFS_I(inode)->i_lc[lc_idx];
254                 goto read_ext;
255         }
256
257         /* maybe it's still in the associative cache */
258         ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
259         if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
260                 ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
261                 goto read_ext;
262         }
263
264         /* try to find one of the previous extended blocks */
265         tmp = ext;
266         idx = ac_idx;
267         while (--tmp, --lc_off > 0) {
268                 idx = (idx - 1) & AFFS_AC_MASK;
269                 if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
270                         ext_key = AFFS_I(inode)->i_ac[idx].key;
271                         goto find_ext;
272                 }
273         }
274
275         /* fall back to the linear cache */
276         ext_key = AFFS_I(inode)->i_lc[lc_idx];
277 find_ext:
278         /* read all extended blocks until we find the one we need */
279         //unlock cache
280         do {
281                 bh = affs_bread(sb, ext_key);
282                 if (!bh)
283                         goto err_bread;
284                 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
285                 affs_brelse(bh);
286                 tmp++;
287         } while (tmp < ext);
288         //lock cache
289
290         /* store it in the associative cache */
291         // recalculate ac_idx?
292         AFFS_I(inode)->i_ac[ac_idx].ext = ext;
293         AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
294
295 read_ext:
296         /* finally read the right extended block */
297         //unlock cache
298         bh = affs_bread(sb, ext_key);
299         if (!bh)
300                 goto err_bread;
301         //lock cache
302
303 store_ext:
304         /* release old cached extended block and store the new one */
305         affs_brelse(AFFS_I(inode)->i_ext_bh);
306         AFFS_I(inode)->i_ext_last = ext;
307         AFFS_I(inode)->i_ext_bh = bh;
308         atomic_inc(&bh->b_count);
309
310         return bh;
311
312 err_bread:
313         affs_brelse(bh);
314         return ERR_PTR(-EIO);
315 }
316
317 static int
318 affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
319 {
320         struct super_block      *sb = inode->i_sb;
321         struct buffer_head      *ext_bh;
322         u32                      ext;
323
324         pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
325
326
327         if (block > (sector_t)0x7fffffffUL)
328                 BUG();
329
330         if (block >= AFFS_I(inode)->i_blkcnt) {
331                 if (block > AFFS_I(inode)->i_blkcnt || !create)
332                         goto err_big;
333         } else
334                 create = 0;
335
336         //lock cache
337         affs_lock_ext(inode);
338
339         ext = (u32)block / AFFS_SB(sb)->s_hashsize;
340         block -= ext * AFFS_SB(sb)->s_hashsize;
341         ext_bh = affs_get_extblock(inode, ext);
342         if (IS_ERR(ext_bh))
343                 goto err_ext;
344         map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
345
346         if (create) {
347                 u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
348                 if (!blocknr)
349                         goto err_alloc;
350                 set_buffer_new(bh_result);
351                 AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
352                 AFFS_I(inode)->i_blkcnt++;
353
354                 /* store new block */
355                 if (bh_result->b_blocknr)
356                         affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
357                 AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
358                 AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
359                 affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
360                 bh_result->b_blocknr = blocknr;
361
362                 if (!block) {
363                         /* insert first block into header block */
364                         u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
365                         if (tmp)
366                                 affs_warning(sb, "get_block", "first block already set (%d)", tmp);
367                         AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
368                         affs_adjust_checksum(ext_bh, blocknr - tmp);
369                 }
370         }
371
372         affs_brelse(ext_bh);
373         //unlock cache
374         affs_unlock_ext(inode);
375         return 0;
376
377 err_big:
378         affs_error(inode->i_sb,"get_block","strange block request %d", block);
379         return -EIO;
380 err_ext:
381         // unlock cache
382         affs_unlock_ext(inode);
383         return PTR_ERR(ext_bh);
384 err_alloc:
385         brelse(ext_bh);
386         clear_buffer_mapped(bh_result);
387         bh_result->b_bdev = NULL;
388         // unlock cache
389         affs_unlock_ext(inode);
390         return -ENOSPC;
391 }
392
393 static int affs_writepage(struct page *page, struct writeback_control *wbc)
394 {
395         return block_write_full_page(page, affs_get_block, wbc);
396 }
397 static int affs_readpage(struct file *file, struct page *page)
398 {
399         return block_read_full_page(page, affs_get_block);
400 }
401 static int affs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
402 {
403         return cont_prepare_write(page, from, to, affs_get_block,
404                 &AFFS_I(page->mapping->host)->mmu_private);
405 }
406 static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
407 {
408         return generic_block_bmap(mapping,block,affs_get_block);
409 }
410 struct address_space_operations affs_aops = {
411         .readpage = affs_readpage,
412         .writepage = affs_writepage,
413         .sync_page = block_sync_page,
414         .prepare_write = affs_prepare_write,
415         .commit_write = generic_commit_write,
416         .bmap = _affs_bmap
417 };
418
419 static inline struct buffer_head *
420 affs_bread_ino(struct inode *inode, int block, int create)
421 {
422         struct buffer_head *bh, tmp_bh;
423         int err;
424
425         tmp_bh.b_state = 0;
426         err = affs_get_block(inode, block, &tmp_bh, create);
427         if (!err) {
428                 bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
429                 if (bh) {
430                         bh->b_state |= tmp_bh.b_state;
431                         return bh;
432                 }
433                 err = -EIO;
434         }
435         return ERR_PTR(err);
436 }
437
438 static inline struct buffer_head *
439 affs_getzeroblk_ino(struct inode *inode, int block)
440 {
441         struct buffer_head *bh, tmp_bh;
442         int err;
443
444         tmp_bh.b_state = 0;
445         err = affs_get_block(inode, block, &tmp_bh, 1);
446         if (!err) {
447                 bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
448                 if (bh) {
449                         bh->b_state |= tmp_bh.b_state;
450                         return bh;
451                 }
452                 err = -EIO;
453         }
454         return ERR_PTR(err);
455 }
456
457 static inline struct buffer_head *
458 affs_getemptyblk_ino(struct inode *inode, int block)
459 {
460         struct buffer_head *bh, tmp_bh;
461         int err;
462
463         tmp_bh.b_state = 0;
464         err = affs_get_block(inode, block, &tmp_bh, 1);
465         if (!err) {
466                 bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
467                 if (bh) {
468                         bh->b_state |= tmp_bh.b_state;
469                         return bh;
470                 }
471                 err = -EIO;
472         }
473         return ERR_PTR(err);
474 }
475
476 static ssize_t
477 affs_file_write(struct file *file, const char __user *buf,
478                 size_t count, loff_t *ppos)
479 {
480         ssize_t retval;
481
482         retval = generic_file_write (file, buf, count, ppos);
483         if (retval >0) {
484                 struct inode *inode = file->f_dentry->d_inode;
485                 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
486                 mark_inode_dirty(inode);
487         }
488         return retval;
489 }
490
491 static int
492 affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
493 {
494         struct inode *inode = page->mapping->host;
495         struct super_block *sb = inode->i_sb;
496         struct buffer_head *bh;
497         char *data;
498         u32 bidx, boff, bsize;
499         u32 tmp;
500
501         pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
502         if (from > to || to > PAGE_CACHE_SIZE)
503                 BUG();
504         kmap(page);
505         data = page_address(page);
506         bsize = AFFS_SB(sb)->s_data_blksize;
507         tmp = (page->index << PAGE_CACHE_SHIFT) + from;
508         bidx = tmp / bsize;
509         boff = tmp % bsize;
510
511         while (from < to) {
512                 bh = affs_bread_ino(inode, bidx, 0);
513                 if (IS_ERR(bh))
514                         return PTR_ERR(bh);
515                 tmp = min(bsize - boff, to - from);
516                 if (from + tmp > to || tmp > bsize)
517                         BUG();
518                 memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
519                 affs_brelse(bh);
520                 bidx++;
521                 from += tmp;
522                 boff = 0;
523         }
524         flush_dcache_page(page);
525         kunmap(page);
526         return 0;
527 }
528
529 static int
530 affs_extent_file_ofs(struct inode *inode, u32 newsize)
531 {
532         struct super_block *sb = inode->i_sb;
533         struct buffer_head *bh, *prev_bh;
534         u32 bidx, boff;
535         u32 size, bsize;
536         u32 tmp;
537
538         pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
539         bsize = AFFS_SB(sb)->s_data_blksize;
540         bh = NULL;
541         size = AFFS_I(inode)->mmu_private;
542         bidx = size / bsize;
543         boff = size % bsize;
544         if (boff) {
545                 bh = affs_bread_ino(inode, bidx, 0);
546                 if (IS_ERR(bh))
547                         return PTR_ERR(bh);
548                 tmp = min(bsize - boff, newsize - size);
549                 if (boff + tmp > bsize || tmp > bsize)
550                         BUG();
551                 memset(AFFS_DATA(bh) + boff, 0, tmp);
552                 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
553                 affs_fix_checksum(sb, bh);
554                 mark_buffer_dirty_inode(bh, inode);
555                 size += tmp;
556                 bidx++;
557         } else if (bidx) {
558                 bh = affs_bread_ino(inode, bidx - 1, 0);
559                 if (IS_ERR(bh))
560                         return PTR_ERR(bh);
561         }
562
563         while (size < newsize) {
564                 prev_bh = bh;
565                 bh = affs_getzeroblk_ino(inode, bidx);
566                 if (IS_ERR(bh))
567                         goto out;
568                 tmp = min(bsize, newsize - size);
569                 if (tmp > bsize)
570                         BUG();
571                 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
572                 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
573                 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
574                 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
575                 affs_fix_checksum(sb, bh);
576                 bh->b_state &= ~(1UL << BH_New);
577                 mark_buffer_dirty_inode(bh, inode);
578                 if (prev_bh) {
579                         u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
580                         if (tmp)
581                                 affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
582                         AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
583                         affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
584                         mark_buffer_dirty_inode(prev_bh, inode);
585                         affs_brelse(prev_bh);
586                 }
587                 size += bsize;
588                 bidx++;
589         }
590         affs_brelse(bh);
591         inode->i_size = AFFS_I(inode)->mmu_private = newsize;
592         return 0;
593
594 out:
595         inode->i_size = AFFS_I(inode)->mmu_private = newsize;
596         return PTR_ERR(bh);
597 }
598
599 static int
600 affs_readpage_ofs(struct file *file, struct page *page)
601 {
602         struct inode *inode = page->mapping->host;
603         u32 to;
604         int err;
605
606         pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
607         to = PAGE_CACHE_SIZE;
608         if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
609                 to = inode->i_size & ~PAGE_CACHE_MASK;
610                 memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
611         }
612
613         err = affs_do_readpage_ofs(file, page, 0, to);
614         if (!err)
615                 SetPageUptodate(page);
616         unlock_page(page);
617         return err;
618 }
619
620 static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
621 {
622         struct inode *inode = page->mapping->host;
623         u32 size, offset;
624         u32 tmp;
625         int err = 0;
626
627         pr_debug("AFFS: prepare_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
628         offset = page->index << PAGE_CACHE_SHIFT;
629         if (offset + from > AFFS_I(inode)->mmu_private) {
630                 err = affs_extent_file_ofs(inode, offset + from);
631                 if (err)
632                         return err;
633         }
634         size = inode->i_size;
635
636         if (PageUptodate(page))
637                 return 0;
638
639         if (from) {
640                 err = affs_do_readpage_ofs(file, page, 0, from);
641                 if (err)
642                         return err;
643         }
644         if (to < PAGE_CACHE_SIZE) {
645                 char *kaddr = kmap_atomic(page, KM_USER0);
646
647                 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
648                 flush_dcache_page(page);
649                 kunmap_atomic(kaddr, KM_USER0);
650                 if (size > offset + to) {
651                         if (size < offset + PAGE_CACHE_SIZE)
652                                 tmp = size & ~PAGE_CACHE_MASK;
653                         else
654                                 tmp = PAGE_CACHE_SIZE;
655                         err = affs_do_readpage_ofs(file, page, to, tmp);
656                 }
657         }
658         return err;
659 }
660
661 static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
662 {
663         struct inode *inode = page->mapping->host;
664         struct super_block *sb = inode->i_sb;
665         struct buffer_head *bh, *prev_bh;
666         char *data;
667         u32 bidx, boff, bsize;
668         u32 tmp;
669         int written;
670
671         pr_debug("AFFS: commit_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
672         bsize = AFFS_SB(sb)->s_data_blksize;
673         data = page_address(page);
674
675         bh = NULL;
676         written = 0;
677         tmp = (page->index << PAGE_CACHE_SHIFT) + from;
678         bidx = tmp / bsize;
679         boff = tmp % bsize;
680         if (boff) {
681                 bh = affs_bread_ino(inode, bidx, 0);
682                 if (IS_ERR(bh))
683                         return PTR_ERR(bh);
684                 tmp = min(bsize - boff, to - from);
685                 if (boff + tmp > bsize || tmp > bsize)
686                         BUG();
687                 memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
688                 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
689                 affs_fix_checksum(sb, bh);
690                 mark_buffer_dirty_inode(bh, inode);
691                 written += tmp;
692                 from += tmp;
693                 bidx++;
694         } else if (bidx) {
695                 bh = affs_bread_ino(inode, bidx - 1, 0);
696                 if (IS_ERR(bh))
697                         return PTR_ERR(bh);
698         }
699         while (from + bsize <= to) {
700                 prev_bh = bh;
701                 bh = affs_getemptyblk_ino(inode, bidx);
702                 if (IS_ERR(bh))
703                         goto out;
704                 memcpy(AFFS_DATA(bh), data + from, bsize);
705                 if (buffer_new(bh)) {
706                         AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
707                         AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
708                         AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
709                         AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
710                         AFFS_DATA_HEAD(bh)->next = 0;
711                         bh->b_state &= ~(1UL << BH_New);
712                         if (prev_bh) {
713                                 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
714                                 if (tmp)
715                                         affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
716                                 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
717                                 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
718                                 mark_buffer_dirty_inode(prev_bh, inode);
719                         }
720                 }
721                 affs_brelse(prev_bh);
722                 affs_fix_checksum(sb, bh);
723                 mark_buffer_dirty_inode(bh, inode);
724                 written += bsize;
725                 from += bsize;
726                 bidx++;
727         }
728         if (from < to) {
729                 prev_bh = bh;
730                 bh = affs_bread_ino(inode, bidx, 1);
731                 if (IS_ERR(bh))
732                         goto out;
733                 tmp = min(bsize, to - from);
734                 if (tmp > bsize)
735                         BUG();
736                 memcpy(AFFS_DATA(bh), data + from, tmp);
737                 if (buffer_new(bh)) {
738                         AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
739                         AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
740                         AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
741                         AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
742                         AFFS_DATA_HEAD(bh)->next = 0;
743                         bh->b_state &= ~(1UL << BH_New);
744                         if (prev_bh) {
745                                 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
746                                 if (tmp)
747                                         affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
748                                 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
749                                 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
750                                 mark_buffer_dirty_inode(prev_bh, inode);
751                         }
752                 } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
753                         AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
754                 affs_brelse(prev_bh);
755                 affs_fix_checksum(sb, bh);
756                 mark_buffer_dirty_inode(bh, inode);
757                 written += tmp;
758                 from += tmp;
759                 bidx++;
760         }
761         SetPageUptodate(page);
762
763 done:
764         affs_brelse(bh);
765         tmp = (page->index << PAGE_CACHE_SHIFT) + from;
766         if (tmp > inode->i_size)
767                 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
768
769         return written;
770
771 out:
772         bh = prev_bh;
773         if (!written)
774                 written = PTR_ERR(bh);
775         goto done;
776 }
777
778 struct address_space_operations affs_aops_ofs = {
779         .readpage = affs_readpage_ofs,
780         //.writepage = affs_writepage_ofs,
781         //.sync_page = affs_sync_page_ofs,
782         .prepare_write = affs_prepare_write_ofs,
783         .commit_write = affs_commit_write_ofs
784 };
785
786 /* Free any preallocated blocks. */
787
788 void
789 affs_free_prealloc(struct inode *inode)
790 {
791         struct super_block *sb = inode->i_sb;
792
793         pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
794
795         while (AFFS_I(inode)->i_pa_cnt) {
796                 AFFS_I(inode)->i_pa_cnt--;
797                 affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
798         }
799 }
800
801 /* Truncate (or enlarge) a file to the requested size. */
802
803 void
804 affs_truncate(struct inode *inode)
805 {
806         struct super_block *sb = inode->i_sb;
807         u32 ext, ext_key;
808         u32 last_blk, blkcnt, blk;
809         u32 size;
810         struct buffer_head *ext_bh;
811         int i;
812
813         pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
814                  (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
815
816         last_blk = 0;
817         ext = 0;
818         if (inode->i_size) {
819                 last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
820                 ext = last_blk / AFFS_SB(sb)->s_hashsize;
821         }
822
823         if (inode->i_size > AFFS_I(inode)->mmu_private) {
824                 struct address_space *mapping = inode->i_mapping;
825                 struct page *page;
826                 u32 size = inode->i_size - 1;
827                 int res;
828
829                 page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
830                 if (!page)
831                         return;
832                 size = (size & (PAGE_CACHE_SIZE - 1)) + 1;
833                 res = mapping->a_ops->prepare_write(NULL, page, size, size);
834                 if (!res)
835                         res = mapping->a_ops->commit_write(NULL, page, size, size);
836                 unlock_page(page);
837                 page_cache_release(page);
838                 mark_inode_dirty(inode);
839                 return;
840         } else if (inode->i_size == AFFS_I(inode)->mmu_private)
841                 return;
842
843         // lock cache
844         ext_bh = affs_get_extblock(inode, ext);
845         if (IS_ERR(ext_bh)) {
846                 affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
847                              ext, PTR_ERR(ext_bh));
848                 return;
849         }
850         if (AFFS_I(inode)->i_lc) {
851                 /* clear linear cache */
852                 i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
853                 if (AFFS_I(inode)->i_lc_size > i) {
854                         AFFS_I(inode)->i_lc_size = i;
855                         for (; i < AFFS_LC_SIZE; i++)
856                                 AFFS_I(inode)->i_lc[i] = 0;
857                 }
858                 /* clear associative cache */
859                 for (i = 0; i < AFFS_AC_SIZE; i++)
860                         if (AFFS_I(inode)->i_ac[i].ext >= ext)
861                                 AFFS_I(inode)->i_ac[i].ext = 0;
862         }
863         ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
864
865         blkcnt = AFFS_I(inode)->i_blkcnt;
866         i = 0;
867         blk = last_blk;
868         if (inode->i_size) {
869                 i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
870                 blk++;
871         } else
872                 AFFS_HEAD(ext_bh)->first_data = 0;
873         size = AFFS_SB(sb)->s_hashsize;
874         if (size > blkcnt - blk + i)
875                 size = blkcnt - blk + i;
876         for (; i < size; i++, blk++) {
877                 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
878                 AFFS_BLOCK(sb, ext_bh, i) = 0;
879         }
880         AFFS_TAIL(sb, ext_bh)->extension = 0;
881         affs_fix_checksum(sb, ext_bh);
882         mark_buffer_dirty_inode(ext_bh, inode);
883         affs_brelse(ext_bh);
884
885         if (inode->i_size) {
886                 AFFS_I(inode)->i_blkcnt = last_blk + 1;
887                 AFFS_I(inode)->i_extcnt = ext + 1;
888                 if (AFFS_SB(sb)->s_flags & SF_OFS) {
889                         struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
890                         u32 tmp;
891                         if (IS_ERR(ext_bh)) {
892                                 affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
893                                              ext, PTR_ERR(ext_bh));
894                                 return;
895                         }
896                         tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
897                         AFFS_DATA_HEAD(bh)->next = 0;
898                         affs_adjust_checksum(bh, -tmp);
899                         affs_brelse(bh);
900                 }
901         } else {
902                 AFFS_I(inode)->i_blkcnt = 0;
903                 AFFS_I(inode)->i_extcnt = 1;
904         }
905         AFFS_I(inode)->mmu_private = inode->i_size;
906         // unlock cache
907
908         while (ext_key) {
909                 ext_bh = affs_bread(sb, ext_key);
910                 size = AFFS_SB(sb)->s_hashsize;
911                 if (size > blkcnt - blk)
912                         size = blkcnt - blk;
913                 for (i = 0; i < size; i++, blk++)
914                         affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
915                 affs_free_block(sb, ext_key);
916                 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
917                 affs_brelse(ext_bh);
918         }
919         affs_free_prealloc(inode);
920 }