NTFS: Fix serious data corruption issue when writing.
[safe/jmp/linux-2.6] / fs / ntfs / file.c
1 /*
2  * file.c - NTFS kernel file operations.  Part of the Linux-NTFS project.
3  *
4  * Copyright (c) 2001-2005 Anton Altaparmakov
5  *
6  * This program/include file is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as published
8  * by the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program/include file is distributed in the hope that it will be
12  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program (in the main directory of the Linux-NTFS
18  * distribution in the file COPYING); if not, write to the Free Software
19  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  */
21
22 #include <linux/buffer_head.h>
23 #include <linux/pagemap.h>
24 #include <linux/pagevec.h>
25 #include <linux/sched.h>
26 #include <linux/swap.h>
27 #include <linux/uio.h>
28 #include <linux/writeback.h>
29
30 #include <asm/page.h>
31 #include <asm/uaccess.h>
32
33 #include "attrib.h"
34 #include "bitmap.h"
35 #include "inode.h"
36 #include "debug.h"
37 #include "lcnalloc.h"
38 #include "malloc.h"
39 #include "mft.h"
40 #include "ntfs.h"
41
42 /**
43  * ntfs_file_open - called when an inode is about to be opened
44  * @vi:         inode to be opened
45  * @filp:       file structure describing the inode
46  *
47  * Limit file size to the page cache limit on architectures where unsigned long
48  * is 32-bits. This is the most we can do for now without overflowing the page
49  * cache page index. Doing it this way means we don't run into problems because
50  * of existing too large files. It would be better to allow the user to read
51  * the beginning of the file but I doubt very much anyone is going to hit this
52  * check on a 32-bit architecture, so there is no point in adding the extra
53  * complexity required to support this.
54  *
55  * On 64-bit architectures, the check is hopefully optimized away by the
56  * compiler.
57  *
58  * After the check passes, just call generic_file_open() to do its work.
59  */
60 static int ntfs_file_open(struct inode *vi, struct file *filp)
61 {
62         if (sizeof(unsigned long) < 8) {
63                 if (i_size_read(vi) > MAX_LFS_FILESIZE)
64                         return -EFBIG;
65         }
66         return generic_file_open(vi, filp);
67 }
68
69 #ifdef NTFS_RW
70
71 /**
72  * ntfs_attr_extend_initialized - extend the initialized size of an attribute
73  * @ni:                 ntfs inode of the attribute to extend
74  * @new_init_size:      requested new initialized size in bytes
75  * @cached_page:        store any allocated but unused page here
76  * @lru_pvec:           lru-buffering pagevec of the caller
77  *
78  * Extend the initialized size of an attribute described by the ntfs inode @ni
79  * to @new_init_size bytes.  This involves zeroing any non-sparse space between
80  * the old initialized size and @new_init_size both in the page cache and on
81  * disk (if relevant complete pages are zeroed in the page cache then these may
82  * simply be marked dirty for later writeout).  There is one caveat and that is
83  * that if any uptodate page cache pages between the old initialized size and
84  * the smaller of @new_init_size and the file size (vfs inode->i_size) are in
85  * memory, these need to be marked dirty without being zeroed since they could
86  * be non-zero due to mmap() based writes.
87  *
88  * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
89  * in the resident attribute case, it is tied to the initialized size and, in
90  * the non-resident attribute case, it may not fall below the initialized size.
91  *
92  * Note that if the attribute is resident, we do not need to touch the page
93  * cache at all.  This is because if the page cache page is not uptodate we
94  * bring it uptodate later, when doing the write to the mft record since we
95  * then already have the page mapped.  And if the page is uptodate, the
96  * non-initialized region will already have been zeroed when the page was
97  * brought uptodate and the region may in fact already have been overwritten
98  * with new data via mmap() based writes, so we cannot just zero it.  And since
99  * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
100  * is unspecified, we choose not to do zeroing and thus we do not need to touch
101  * the page at all.  For a more detailed explanation see ntfs_truncate() which
102  * is in fs/ntfs/inode.c.
103  *
104  * @cached_page and @lru_pvec are just optimisations for dealing with multiple
105  * pages.
106  *
107  * Return 0 on success and -errno on error.  In the case that an error is
108  * encountered it is possible that the initialized size will already have been
109  * incremented some way towards @new_init_size but it is guaranteed that if
110  * this is the case, the necessary zeroing will also have happened and that all
111  * metadata is self-consistent.
112  *
113  * Locking: This function locks the mft record of the base ntfs inode and
114  * maintains the lock throughout execution of the function.  This is required
115  * so that the initialized size of the attribute can be modified safely.
116  */
117 static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
118                 struct page **cached_page, struct pagevec *lru_pvec)
119 {
120         s64 old_init_size;
121         loff_t old_i_size;
122         pgoff_t index, end_index;
123         unsigned long flags;
124         struct inode *vi = VFS_I(ni);
125         ntfs_inode *base_ni;
126         MFT_RECORD *m = NULL;
127         ATTR_RECORD *a;
128         ntfs_attr_search_ctx *ctx = NULL;
129         struct address_space *mapping;
130         struct page *page = NULL;
131         u8 *kattr;
132         int err;
133         u32 attr_len;
134
135         read_lock_irqsave(&ni->size_lock, flags);
136         old_init_size = ni->initialized_size;
137         old_i_size = i_size_read(vi);
138         BUG_ON(new_init_size > ni->allocated_size);
139         read_unlock_irqrestore(&ni->size_lock, flags);
140         ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
141                         "old_initialized_size 0x%llx, "
142                         "new_initialized_size 0x%llx, i_size 0x%llx.",
143                         vi->i_ino, (unsigned)le32_to_cpu(ni->type),
144                         (unsigned long long)old_init_size,
145                         (unsigned long long)new_init_size, old_i_size);
146         if (!NInoAttr(ni))
147                 base_ni = ni;
148         else
149                 base_ni = ni->ext.base_ntfs_ino;
150         /* Use goto to reduce indentation and we need the label below anyway. */
151         if (NInoNonResident(ni))
152                 goto do_non_resident_extend;
153         BUG_ON(old_init_size != old_i_size);
154         m = map_mft_record(base_ni);
155         if (IS_ERR(m)) {
156                 err = PTR_ERR(m);
157                 m = NULL;
158                 goto err_out;
159         }
160         ctx = ntfs_attr_get_search_ctx(base_ni, m);
161         if (unlikely(!ctx)) {
162                 err = -ENOMEM;
163                 goto err_out;
164         }
165         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
166                         CASE_SENSITIVE, 0, NULL, 0, ctx);
167         if (unlikely(err)) {
168                 if (err == -ENOENT)
169                         err = -EIO;
170                 goto err_out;
171         }
172         m = ctx->mrec;
173         a = ctx->attr;
174         BUG_ON(a->non_resident);
175         /* The total length of the attribute value. */
176         attr_len = le32_to_cpu(a->data.resident.value_length);
177         BUG_ON(old_i_size != (loff_t)attr_len);
178         /*
179          * Do the zeroing in the mft record and update the attribute size in
180          * the mft record.
181          */
182         kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
183         memset(kattr + attr_len, 0, new_init_size - attr_len);
184         a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
185         /* Finally, update the sizes in the vfs and ntfs inodes. */
186         write_lock_irqsave(&ni->size_lock, flags);
187         i_size_write(vi, new_init_size);
188         ni->initialized_size = new_init_size;
189         write_unlock_irqrestore(&ni->size_lock, flags);
190         goto done;
191 do_non_resident_extend:
192         /*
193          * If the new initialized size @new_init_size exceeds the current file
194          * size (vfs inode->i_size), we need to extend the file size to the
195          * new initialized size.
196          */
197         if (new_init_size > old_i_size) {
198                 m = map_mft_record(base_ni);
199                 if (IS_ERR(m)) {
200                         err = PTR_ERR(m);
201                         m = NULL;
202                         goto err_out;
203                 }
204                 ctx = ntfs_attr_get_search_ctx(base_ni, m);
205                 if (unlikely(!ctx)) {
206                         err = -ENOMEM;
207                         goto err_out;
208                 }
209                 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
210                                 CASE_SENSITIVE, 0, NULL, 0, ctx);
211                 if (unlikely(err)) {
212                         if (err == -ENOENT)
213                                 err = -EIO;
214                         goto err_out;
215                 }
216                 m = ctx->mrec;
217                 a = ctx->attr;
218                 BUG_ON(!a->non_resident);
219                 BUG_ON(old_i_size != (loff_t)
220                                 sle64_to_cpu(a->data.non_resident.data_size));
221                 a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
222                 flush_dcache_mft_record_page(ctx->ntfs_ino);
223                 mark_mft_record_dirty(ctx->ntfs_ino);
224                 /* Update the file size in the vfs inode. */
225                 i_size_write(vi, new_init_size);
226                 ntfs_attr_put_search_ctx(ctx);
227                 ctx = NULL;
228                 unmap_mft_record(base_ni);
229                 m = NULL;
230         }
231         mapping = vi->i_mapping;
232         index = old_init_size >> PAGE_CACHE_SHIFT;
233         end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
234         do {
235                 /*
236                  * Read the page.  If the page is not present, this will zero
237                  * the uninitialized regions for us.
238                  */
239                 page = read_cache_page(mapping, index,
240                                 (filler_t*)mapping->a_ops->readpage, NULL);
241                 if (IS_ERR(page)) {
242                         err = PTR_ERR(page);
243                         goto init_err_out;
244                 }
245                 wait_on_page_locked(page);
246                 if (unlikely(!PageUptodate(page) || PageError(page))) {
247                         page_cache_release(page);
248                         err = -EIO;
249                         goto init_err_out;
250                 }
251                 /*
252                  * Update the initialized size in the ntfs inode.  This is
253                  * enough to make ntfs_writepage() work.
254                  */
255                 write_lock_irqsave(&ni->size_lock, flags);
256                 ni->initialized_size = (index + 1) << PAGE_CACHE_SHIFT;
257                 if (ni->initialized_size > new_init_size)
258                         ni->initialized_size = new_init_size;
259                 write_unlock_irqrestore(&ni->size_lock, flags);
260                 /* Set the page dirty so it gets written out. */
261                 set_page_dirty(page);
262                 page_cache_release(page);
263                 /*
264                  * Play nice with the vm and the rest of the system.  This is
265                  * very much needed as we can potentially be modifying the
266                  * initialised size from a very small value to a really huge
267                  * value, e.g.
268                  *      f = open(somefile, O_TRUNC);
269                  *      truncate(f, 10GiB);
270                  *      seek(f, 10GiB);
271                  *      write(f, 1);
272                  * And this would mean we would be marking dirty hundreds of
273                  * thousands of pages or as in the above example more than
274                  * two and a half million pages!
275                  *
276                  * TODO: For sparse pages could optimize this workload by using
277                  * the FsMisc / MiscFs page bit as a "PageIsSparse" bit.  This
278                  * would be set in readpage for sparse pages and here we would
279                  * not need to mark dirty any pages which have this bit set.
280                  * The only caveat is that we have to clear the bit everywhere
281                  * where we allocate any clusters that lie in the page or that
282                  * contain the page.
283                  *
284                  * TODO: An even greater optimization would be for us to only
285                  * call readpage() on pages which are not in sparse regions as
286                  * determined from the runlist.  This would greatly reduce the
287                  * number of pages we read and make dirty in the case of sparse
288                  * files.
289                  */
290                 balance_dirty_pages_ratelimited(mapping);
291                 cond_resched();
292         } while (++index < end_index);
293         read_lock_irqsave(&ni->size_lock, flags);
294         BUG_ON(ni->initialized_size != new_init_size);
295         read_unlock_irqrestore(&ni->size_lock, flags);
296         /* Now bring in sync the initialized_size in the mft record. */
297         m = map_mft_record(base_ni);
298         if (IS_ERR(m)) {
299                 err = PTR_ERR(m);
300                 m = NULL;
301                 goto init_err_out;
302         }
303         ctx = ntfs_attr_get_search_ctx(base_ni, m);
304         if (unlikely(!ctx)) {
305                 err = -ENOMEM;
306                 goto init_err_out;
307         }
308         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
309                         CASE_SENSITIVE, 0, NULL, 0, ctx);
310         if (unlikely(err)) {
311                 if (err == -ENOENT)
312                         err = -EIO;
313                 goto init_err_out;
314         }
315         m = ctx->mrec;
316         a = ctx->attr;
317         BUG_ON(!a->non_resident);
318         a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
319 done:
320         flush_dcache_mft_record_page(ctx->ntfs_ino);
321         mark_mft_record_dirty(ctx->ntfs_ino);
322         if (ctx)
323                 ntfs_attr_put_search_ctx(ctx);
324         if (m)
325                 unmap_mft_record(base_ni);
326         ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
327                         (unsigned long long)new_init_size, i_size_read(vi));
328         return 0;
329 init_err_out:
330         write_lock_irqsave(&ni->size_lock, flags);
331         ni->initialized_size = old_init_size;
332         write_unlock_irqrestore(&ni->size_lock, flags);
333 err_out:
334         if (ctx)
335                 ntfs_attr_put_search_ctx(ctx);
336         if (m)
337                 unmap_mft_record(base_ni);
338         ntfs_debug("Failed.  Returning error code %i.", err);
339         return err;
340 }
341
342 /**
343  * ntfs_fault_in_pages_readable -
344  *
345  * Fault a number of userspace pages into pagetables.
346  *
347  * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
348  * with more than two userspace pages as well as handling the single page case
349  * elegantly.
350  *
351  * If you find this difficult to understand, then think of the while loop being
352  * the following code, except that we do without the integer variable ret:
353  *
354  *      do {
355  *              ret = __get_user(c, uaddr);
356  *              uaddr += PAGE_SIZE;
357  *      } while (!ret && uaddr < end);
358  *
359  * Note, the final __get_user() may well run out-of-bounds of the user buffer,
360  * but _not_ out-of-bounds of the page the user buffer belongs to, and since
361  * this is only a read and not a write, and since it is still in the same page,
362  * it should not matter and this makes the code much simpler.
363  */
364 static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
365                 int bytes)
366 {
367         const char __user *end;
368         volatile char c;
369
370         /* Set @end to the first byte outside the last page we care about. */
371         end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes);
372
373         while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
374                 ;
375 }
376
377 /**
378  * ntfs_fault_in_pages_readable_iovec -
379  *
380  * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
381  */
382 static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
383                 size_t iov_ofs, int bytes)
384 {
385         do {
386                 const char __user *buf;
387                 unsigned len;
388
389                 buf = iov->iov_base + iov_ofs;
390                 len = iov->iov_len - iov_ofs;
391                 if (len > bytes)
392                         len = bytes;
393                 ntfs_fault_in_pages_readable(buf, len);
394                 bytes -= len;
395                 iov++;
396                 iov_ofs = 0;
397         } while (bytes);
398 }
399
400 /**
401  * __ntfs_grab_cache_pages - obtain a number of locked pages
402  * @mapping:    address space mapping from which to obtain page cache pages
403  * @index:      starting index in @mapping at which to begin obtaining pages
404  * @nr_pages:   number of page cache pages to obtain
405  * @pages:      array of pages in which to return the obtained page cache pages
406  * @cached_page: allocated but as yet unused page
407  * @lru_pvec:   lru-buffering pagevec of caller
408  *
409  * Obtain @nr_pages locked page cache pages from the mapping @maping and
410  * starting at index @index.
411  *
412  * If a page is newly created, increment its refcount and add it to the
413  * caller's lru-buffering pagevec @lru_pvec.
414  *
415  * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
416  * are obtained at once instead of just one page and that 0 is returned on
417  * success and -errno on error.
418  *
419  * Note, the page locks are obtained in ascending page index order.
420  */
421 static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
422                 pgoff_t index, const unsigned nr_pages, struct page **pages,
423                 struct page **cached_page, struct pagevec *lru_pvec)
424 {
425         int err, nr;
426
427         BUG_ON(!nr_pages);
428         err = nr = 0;
429         do {
430                 pages[nr] = find_lock_page(mapping, index);
431                 if (!pages[nr]) {
432                         if (!*cached_page) {
433                                 *cached_page = page_cache_alloc(mapping);
434                                 if (unlikely(!*cached_page)) {
435                                         err = -ENOMEM;
436                                         goto err_out;
437                                 }
438                         }
439                         err = add_to_page_cache(*cached_page, mapping, index,
440                                         GFP_KERNEL);
441                         if (unlikely(err)) {
442                                 if (err == -EEXIST)
443                                         continue;
444                                 goto err_out;
445                         }
446                         pages[nr] = *cached_page;
447                         page_cache_get(*cached_page);
448                         if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
449                                 __pagevec_lru_add(lru_pvec);
450                         *cached_page = NULL;
451                 }
452                 index++;
453                 nr++;
454         } while (nr < nr_pages);
455 out:
456         return err;
457 err_out:
458         while (nr > 0) {
459                 unlock_page(pages[--nr]);
460                 page_cache_release(pages[nr]);
461         }
462         goto out;
463 }
464
465 static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
466 {
467         lock_buffer(bh);
468         get_bh(bh);
469         bh->b_end_io = end_buffer_read_sync;
470         return submit_bh(READ, bh);
471 }
472
473 /**
474  * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
475  * @pages:      array of destination pages
476  * @nr_pages:   number of pages in @pages
477  * @pos:        byte position in file at which the write begins
478  * @bytes:      number of bytes to be written
479  *
480  * This is called for non-resident attributes from ntfs_file_buffered_write()
481  * with i_sem held on the inode (@pages[0]->mapping->host).  There are
482  * @nr_pages pages in @pages which are locked but not kmap()ped.  The source
483  * data has not yet been copied into the @pages.
484  * 
485  * Need to fill any holes with actual clusters, allocate buffers if necessary,
486  * ensure all the buffers are mapped, and bring uptodate any buffers that are
487  * only partially being written to.
488  *
489  * If @nr_pages is greater than one, we are guaranteed that the cluster size is
490  * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
491  * the same cluster and that they are the entirety of that cluster, and that
492  * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
493  *
494  * i_size is not to be modified yet.
495  *
496  * Return 0 on success or -errno on error.
497  */
498 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
499                 unsigned nr_pages, s64 pos, size_t bytes)
500 {
501         VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
502         LCN lcn;
503         s64 bh_pos, vcn_len, end, initialized_size;
504         sector_t lcn_block;
505         struct page *page;
506         struct inode *vi;
507         ntfs_inode *ni, *base_ni = NULL;
508         ntfs_volume *vol;
509         runlist_element *rl, *rl2;
510         struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
511         ntfs_attr_search_ctx *ctx = NULL;
512         MFT_RECORD *m = NULL;
513         ATTR_RECORD *a = NULL;
514         unsigned long flags;
515         u32 attr_rec_len = 0;
516         unsigned blocksize, u;
517         int err, mp_size;
518         BOOL rl_write_locked, was_hole, is_retry;
519         unsigned char blocksize_bits;
520         struct {
521                 u8 runlist_merged:1;
522                 u8 mft_attr_mapped:1;
523                 u8 mp_rebuilt:1;
524                 u8 attr_switched:1;
525         } status = { 0, 0, 0, 0 };
526
527         BUG_ON(!nr_pages);
528         BUG_ON(!pages);
529         BUG_ON(!*pages);
530         vi = pages[0]->mapping->host;
531         ni = NTFS_I(vi);
532         vol = ni->vol;
533         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
534                         "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%x.",
535                         vi->i_ino, ni->type, pages[0]->index, nr_pages,
536                         (long long)pos, bytes);
537         blocksize_bits = vi->i_blkbits;
538         blocksize = 1 << blocksize_bits;
539         u = 0;
540         do {
541                 struct page *page = pages[u];
542                 /*
543                  * create_empty_buffers() will create uptodate/dirty buffers if
544                  * the page is uptodate/dirty.
545                  */
546                 if (!page_has_buffers(page)) {
547                         create_empty_buffers(page, blocksize, 0);
548                         if (unlikely(!page_has_buffers(page)))
549                                 return -ENOMEM;
550                 }
551         } while (++u < nr_pages);
552         rl_write_locked = FALSE;
553         rl = NULL;
554         err = 0;
555         vcn = lcn = -1;
556         vcn_len = 0;
557         lcn_block = -1;
558         was_hole = FALSE;
559         cpos = pos >> vol->cluster_size_bits;
560         end = pos + bytes;
561         cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
562         /*
563          * Loop over each page and for each page over each buffer.  Use goto to
564          * reduce indentation.
565          */
566         u = 0;
567 do_next_page:
568         page = pages[u];
569         bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
570         bh = head = page_buffers(page);
571         do {
572                 VCN cdelta;
573                 s64 bh_end;
574                 unsigned bh_cofs;
575
576                 /* Clear buffer_new on all buffers to reinitialise state. */
577                 if (buffer_new(bh))
578                         clear_buffer_new(bh);
579                 bh_end = bh_pos + blocksize;
580                 bh_cpos = bh_pos >> vol->cluster_size_bits;
581                 bh_cofs = bh_pos & vol->cluster_size_mask;
582                 if (buffer_mapped(bh)) {
583                         /*
584                          * The buffer is already mapped.  If it is uptodate,
585                          * ignore it.
586                          */
587                         if (buffer_uptodate(bh))
588                                 continue;
589                         /*
590                          * The buffer is not uptodate.  If the page is uptodate
591                          * set the buffer uptodate and otherwise ignore it.
592                          */
593                         if (PageUptodate(page)) {
594                                 set_buffer_uptodate(bh);
595                                 continue;
596                         }
597                         /*
598                          * Neither the page nor the buffer are uptodate.  If
599                          * the buffer is only partially being written to, we
600                          * need to read it in before the write, i.e. now.
601                          */
602                         if ((bh_pos < pos && bh_end > pos) ||
603                                         (bh_pos < end && bh_end > end)) {
604                                 /*
605                                  * If the buffer is fully or partially within
606                                  * the initialized size, do an actual read.
607                                  * Otherwise, simply zero the buffer.
608                                  */
609                                 read_lock_irqsave(&ni->size_lock, flags);
610                                 initialized_size = ni->initialized_size;
611                                 read_unlock_irqrestore(&ni->size_lock, flags);
612                                 if (bh_pos < initialized_size) {
613                                         ntfs_submit_bh_for_read(bh);
614                                         *wait_bh++ = bh;
615                                 } else {
616                                         u8 *kaddr = kmap_atomic(page, KM_USER0);
617                                         memset(kaddr + bh_offset(bh), 0,
618                                                         blocksize);
619                                         kunmap_atomic(kaddr, KM_USER0);
620                                         flush_dcache_page(page);
621                                         set_buffer_uptodate(bh);
622                                 }
623                         }
624                         continue;
625                 }
626                 /* Unmapped buffer.  Need to map it. */
627                 bh->b_bdev = vol->sb->s_bdev;
628                 /*
629                  * If the current buffer is in the same clusters as the map
630                  * cache, there is no need to check the runlist again.  The
631                  * map cache is made up of @vcn, which is the first cached file
632                  * cluster, @vcn_len which is the number of cached file
633                  * clusters, @lcn is the device cluster corresponding to @vcn,
634                  * and @lcn_block is the block number corresponding to @lcn.
635                  */
636                 cdelta = bh_cpos - vcn;
637                 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
638 map_buffer_cached:
639                         BUG_ON(lcn < 0);
640                         bh->b_blocknr = lcn_block +
641                                         (cdelta << (vol->cluster_size_bits -
642                                         blocksize_bits)) +
643                                         (bh_cofs >> blocksize_bits);
644                         set_buffer_mapped(bh);
645                         /*
646                          * If the page is uptodate so is the buffer.  If the
647                          * buffer is fully outside the write, we ignore it if
648                          * it was already allocated and we mark it dirty so it
649                          * gets written out if we allocated it.  On the other
650                          * hand, if we allocated the buffer but we are not
651                          * marking it dirty we set buffer_new so we can do
652                          * error recovery.
653                          */
654                         if (PageUptodate(page)) {
655                                 if (!buffer_uptodate(bh))
656                                         set_buffer_uptodate(bh);
657                                 if (unlikely(was_hole)) {
658                                         /* We allocated the buffer. */
659                                         unmap_underlying_metadata(bh->b_bdev,
660                                                         bh->b_blocknr);
661                                         if (bh_end <= pos || bh_pos >= end)
662                                                 mark_buffer_dirty(bh);
663                                         else
664                                                 set_buffer_new(bh);
665                                 }
666                                 continue;
667                         }
668                         /* Page is _not_ uptodate. */
669                         if (likely(!was_hole)) {
670                                 /*
671                                  * Buffer was already allocated.  If it is not
672                                  * uptodate and is only partially being written
673                                  * to, we need to read it in before the write,
674                                  * i.e. now.
675                                  */
676                                 if (!buffer_uptodate(bh) && ((bh_pos < pos &&
677                                                 bh_end > pos) ||
678                                                 (bh_end > end &&
679                                                 bh_end > end))) {
680                                         /*
681                                          * If the buffer is fully or partially
682                                          * within the initialized size, do an
683                                          * actual read.  Otherwise, simply zero
684                                          * the buffer.
685                                          */
686                                         read_lock_irqsave(&ni->size_lock,
687                                                         flags);
688                                         initialized_size = ni->initialized_size;
689                                         read_unlock_irqrestore(&ni->size_lock,
690                                                         flags);
691                                         if (bh_pos < initialized_size) {
692                                                 ntfs_submit_bh_for_read(bh);
693                                                 *wait_bh++ = bh;
694                                         } else {
695                                                 u8 *kaddr = kmap_atomic(page,
696                                                                 KM_USER0);
697                                                 memset(kaddr + bh_offset(bh),
698                                                                 0, blocksize);
699                                                 kunmap_atomic(kaddr, KM_USER0);
700                                                 flush_dcache_page(page);
701                                                 set_buffer_uptodate(bh);
702                                         }
703                                 }
704                                 continue;
705                         }
706                         /* We allocated the buffer. */
707                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
708                         /*
709                          * If the buffer is fully outside the write, zero it,
710                          * set it uptodate, and mark it dirty so it gets
711                          * written out.  If it is partially being written to,
712                          * zero region surrounding the write but leave it to
713                          * commit write to do anything else.  Finally, if the
714                          * buffer is fully being overwritten, do nothing.
715                          */
716                         if (bh_end <= pos || bh_pos >= end) {
717                                 if (!buffer_uptodate(bh)) {
718                                         u8 *kaddr = kmap_atomic(page, KM_USER0);
719                                         memset(kaddr + bh_offset(bh), 0,
720                                                         blocksize);
721                                         kunmap_atomic(kaddr, KM_USER0);
722                                         flush_dcache_page(page);
723                                         set_buffer_uptodate(bh);
724                                 }
725                                 mark_buffer_dirty(bh);
726                                 continue;
727                         }
728                         set_buffer_new(bh);
729                         if (!buffer_uptodate(bh) &&
730                                         (bh_pos < pos || bh_end > end)) {
731                                 u8 *kaddr;
732                                 unsigned pofs;
733                                         
734                                 kaddr = kmap_atomic(page, KM_USER0);
735                                 if (bh_pos < pos) {
736                                         pofs = bh_pos & ~PAGE_CACHE_MASK;
737                                         memset(kaddr + pofs, 0, pos - bh_pos);
738                                 }
739                                 if (bh_end > end) {
740                                         pofs = end & ~PAGE_CACHE_MASK;
741                                         memset(kaddr + pofs, 0, bh_end - end);
742                                 }
743                                 kunmap_atomic(kaddr, KM_USER0);
744                                 flush_dcache_page(page);
745                         }
746                         continue;
747                 }
748                 /*
749                  * Slow path: this is the first buffer in the cluster.  If it
750                  * is outside allocated size and is not uptodate, zero it and
751                  * set it uptodate.
752                  */
753                 read_lock_irqsave(&ni->size_lock, flags);
754                 initialized_size = ni->allocated_size;
755                 read_unlock_irqrestore(&ni->size_lock, flags);
756                 if (bh_pos > initialized_size) {
757                         if (PageUptodate(page)) {
758                                 if (!buffer_uptodate(bh))
759                                         set_buffer_uptodate(bh);
760                         } else if (!buffer_uptodate(bh)) {
761                                 u8 *kaddr = kmap_atomic(page, KM_USER0);
762                                 memset(kaddr + bh_offset(bh), 0, blocksize);
763                                 kunmap_atomic(kaddr, KM_USER0);
764                                 flush_dcache_page(page);
765                                 set_buffer_uptodate(bh);
766                         }
767                         continue;
768                 }
769                 is_retry = FALSE;
770                 if (!rl) {
771                         down_read(&ni->runlist.lock);
772 retry_remap:
773                         rl = ni->runlist.rl;
774                 }
775                 if (likely(rl != NULL)) {
776                         /* Seek to element containing target cluster. */
777                         while (rl->length && rl[1].vcn <= bh_cpos)
778                                 rl++;
779                         lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
780                         if (likely(lcn >= 0)) {
781                                 /*
782                                  * Successful remap, setup the map cache and
783                                  * use that to deal with the buffer.
784                                  */
785                                 was_hole = FALSE;
786                                 vcn = bh_cpos;
787                                 vcn_len = rl[1].vcn - vcn;
788                                 lcn_block = lcn << (vol->cluster_size_bits -
789                                                 blocksize_bits);
790                                 cdelta = 0;
791                                 /*
792                                  * If the number of remaining clusters in the
793                                  * @pages is smaller or equal to the number of
794                                  * cached clusters, unlock the runlist as the
795                                  * map cache will be used from now on.
796                                  */
797                                 if (likely(vcn + vcn_len >= cend)) {
798                                         if (rl_write_locked) {
799                                                 up_write(&ni->runlist.lock);
800                                                 rl_write_locked = FALSE;
801                                         } else
802                                                 up_read(&ni->runlist.lock);
803                                         rl = NULL;
804                                 }
805                                 goto map_buffer_cached;
806                         }
807                 } else
808                         lcn = LCN_RL_NOT_MAPPED;
809                 /*
810                  * If it is not a hole and not out of bounds, the runlist is
811                  * probably unmapped so try to map it now.
812                  */
813                 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
814                         if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
815                                 /* Attempt to map runlist. */
816                                 if (!rl_write_locked) {
817                                         /*
818                                          * We need the runlist locked for
819                                          * writing, so if it is locked for
820                                          * reading relock it now and retry in
821                                          * case it changed whilst we dropped
822                                          * the lock.
823                                          */
824                                         up_read(&ni->runlist.lock);
825                                         down_write(&ni->runlist.lock);
826                                         rl_write_locked = TRUE;
827                                         goto retry_remap;
828                                 }
829                                 err = ntfs_map_runlist_nolock(ni, bh_cpos,
830                                                 NULL);
831                                 if (likely(!err)) {
832                                         is_retry = TRUE;
833                                         goto retry_remap;
834                                 }
835                                 /*
836                                  * If @vcn is out of bounds, pretend @lcn is
837                                  * LCN_ENOENT.  As long as the buffer is out
838                                  * of bounds this will work fine.
839                                  */
840                                 if (err == -ENOENT) {
841                                         lcn = LCN_ENOENT;
842                                         err = 0;
843                                         goto rl_not_mapped_enoent;
844                                 }
845                         } else
846                                 err = -EIO;
847                         /* Failed to map the buffer, even after retrying. */
848                         bh->b_blocknr = -1;
849                         ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
850                                         "attribute type 0x%x, vcn 0x%llx, "
851                                         "vcn offset 0x%x, because its "
852                                         "location on disk could not be "
853                                         "determined%s (error code %i).",
854                                         ni->mft_no, ni->type,
855                                         (unsigned long long)bh_cpos,
856                                         (unsigned)bh_pos &
857                                         vol->cluster_size_mask,
858                                         is_retry ? " even after retrying" : "",
859                                         err);
860                         break;
861                 }
862 rl_not_mapped_enoent:
863                 /*
864                  * The buffer is in a hole or out of bounds.  We need to fill
865                  * the hole, unless the buffer is in a cluster which is not
866                  * touched by the write, in which case we just leave the buffer
867                  * unmapped.  This can only happen when the cluster size is
868                  * less than the page cache size.
869                  */
870                 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
871                         bh_cend = (bh_end + vol->cluster_size - 1) >>
872                                         vol->cluster_size_bits;
873                         if ((bh_cend <= cpos || bh_cpos >= cend)) {
874                                 bh->b_blocknr = -1;
875                                 /*
876                                  * If the buffer is uptodate we skip it.  If it
877                                  * is not but the page is uptodate, we can set
878                                  * the buffer uptodate.  If the page is not
879                                  * uptodate, we can clear the buffer and set it
880                                  * uptodate.  Whether this is worthwhile is
881                                  * debatable and this could be removed.
882                                  */
883                                 if (PageUptodate(page)) {
884                                         if (!buffer_uptodate(bh))
885                                                 set_buffer_uptodate(bh);
886                                 } else if (!buffer_uptodate(bh)) {
887                                         u8 *kaddr = kmap_atomic(page, KM_USER0);
888                                         memset(kaddr + bh_offset(bh), 0,
889                                                         blocksize);
890                                         kunmap_atomic(kaddr, KM_USER0);
891                                         flush_dcache_page(page);
892                                         set_buffer_uptodate(bh);
893                                 }
894                                 continue;
895                         }
896                 }
897                 /*
898                  * Out of bounds buffer is invalid if it was not really out of
899                  * bounds.
900                  */
901                 BUG_ON(lcn != LCN_HOLE);
902                 /*
903                  * We need the runlist locked for writing, so if it is locked
904                  * for reading relock it now and retry in case it changed
905                  * whilst we dropped the lock.
906                  */
907                 BUG_ON(!rl);
908                 if (!rl_write_locked) {
909                         up_read(&ni->runlist.lock);
910                         down_write(&ni->runlist.lock);
911                         rl_write_locked = TRUE;
912                         goto retry_remap;
913                 }
914                 /* Find the previous last allocated cluster. */
915                 BUG_ON(rl->lcn != LCN_HOLE);
916                 lcn = -1;
917                 rl2 = rl;
918                 while (--rl2 >= ni->runlist.rl) {
919                         if (rl2->lcn >= 0) {
920                                 lcn = rl2->lcn + rl2->length;
921                                 break;
922                         }
923                 }
924                 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
925                                 FALSE);
926                 if (IS_ERR(rl2)) {
927                         err = PTR_ERR(rl2);
928                         ntfs_debug("Failed to allocate cluster, error code %i.",
929                                         err);
930                         break;
931                 }
932                 lcn = rl2->lcn;
933                 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
934                 if (IS_ERR(rl)) {
935                         err = PTR_ERR(rl);
936                         if (err != -ENOMEM)
937                                 err = -EIO;
938                         if (ntfs_cluster_free_from_rl(vol, rl2)) {
939                                 ntfs_error(vol->sb, "Failed to release "
940                                                 "allocated cluster in error "
941                                                 "code path.  Run chkdsk to "
942                                                 "recover the lost cluster.");
943                                 NVolSetErrors(vol);
944                         }
945                         ntfs_free(rl2);
946                         break;
947                 }
948                 ni->runlist.rl = rl;
949                 status.runlist_merged = 1;
950                 ntfs_debug("Allocated cluster, lcn 0x%llx.", lcn);
951                 /* Map and lock the mft record and get the attribute record. */
952                 if (!NInoAttr(ni))
953                         base_ni = ni;
954                 else
955                         base_ni = ni->ext.base_ntfs_ino;
956                 m = map_mft_record(base_ni);
957                 if (IS_ERR(m)) {
958                         err = PTR_ERR(m);
959                         break;
960                 }
961                 ctx = ntfs_attr_get_search_ctx(base_ni, m);
962                 if (unlikely(!ctx)) {
963                         err = -ENOMEM;
964                         unmap_mft_record(base_ni);
965                         break;
966                 }
967                 status.mft_attr_mapped = 1;
968                 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
969                                 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
970                 if (unlikely(err)) {
971                         if (err == -ENOENT)
972                                 err = -EIO;
973                         break;
974                 }
975                 m = ctx->mrec;
976                 a = ctx->attr;
977                 /*
978                  * Find the runlist element with which the attribute extent
979                  * starts.  Note, we cannot use the _attr_ version because we
980                  * have mapped the mft record.  That is ok because we know the
981                  * runlist fragment must be mapped already to have ever gotten
982                  * here, so we can just use the _rl_ version.
983                  */
984                 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
985                 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
986                 BUG_ON(!rl2);
987                 BUG_ON(!rl2->length);
988                 BUG_ON(rl2->lcn < LCN_HOLE);
989                 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
990                 /*
991                  * If @highest_vcn is zero, calculate the real highest_vcn
992                  * (which can really be zero).
993                  */
994                 if (!highest_vcn)
995                         highest_vcn = (sle64_to_cpu(
996                                         a->data.non_resident.allocated_size) >>
997                                         vol->cluster_size_bits) - 1;
998                 /*
999                  * Determine the size of the mapping pairs array for the new
1000                  * extent, i.e. the old extent with the hole filled.
1001                  */
1002                 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
1003                                 highest_vcn);
1004                 if (unlikely(mp_size <= 0)) {
1005                         if (!(err = mp_size))
1006                                 err = -EIO;
1007                         ntfs_debug("Failed to get size for mapping pairs "
1008                                         "array, error code %i.", err);
1009                         break;
1010                 }
1011                 /*
1012                  * Resize the attribute record to fit the new mapping pairs
1013                  * array.
1014                  */
1015                 attr_rec_len = le32_to_cpu(a->length);
1016                 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
1017                                 a->data.non_resident.mapping_pairs_offset));
1018                 if (unlikely(err)) {
1019                         BUG_ON(err != -ENOSPC);
1020                         // TODO: Deal with this by using the current attribute
1021                         // and fill it with as much of the mapping pairs
1022                         // array as possible.  Then loop over each attribute
1023                         // extent rewriting the mapping pairs arrays as we go
1024                         // along and if when we reach the end we have not
1025                         // enough space, try to resize the last attribute
1026                         // extent and if even that fails, add a new attribute
1027                         // extent.
1028                         // We could also try to resize at each step in the hope
1029                         // that we will not need to rewrite every single extent.
1030                         // Note, we may need to decompress some extents to fill
1031                         // the runlist as we are walking the extents...
1032                         ntfs_error(vol->sb, "Not enough space in the mft "
1033                                         "record for the extended attribute "
1034                                         "record.  This case is not "
1035                                         "implemented yet.");
1036                         err = -EOPNOTSUPP;
1037                         break ;
1038                 }
1039                 status.mp_rebuilt = 1;
1040                 /*
1041                  * Generate the mapping pairs array directly into the attribute
1042                  * record.
1043                  */
1044                 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1045                                 a->data.non_resident.mapping_pairs_offset),
1046                                 mp_size, rl2, vcn, highest_vcn, NULL);
1047                 if (unlikely(err)) {
1048                         ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
1049                                         "attribute type 0x%x, because building "
1050                                         "the mapping pairs failed with error "
1051                                         "code %i.", vi->i_ino,
1052                                         (unsigned)le32_to_cpu(ni->type), err);
1053                         err = -EIO;
1054                         break;
1055                 }
1056                 /* Update the highest_vcn but only if it was not set. */
1057                 if (unlikely(!a->data.non_resident.highest_vcn))
1058                         a->data.non_resident.highest_vcn =
1059                                         cpu_to_sle64(highest_vcn);
1060                 /*
1061                  * If the attribute is sparse/compressed, update the compressed
1062                  * size in the ntfs_inode structure and the attribute record.
1063                  */
1064                 if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
1065                         /*
1066                          * If we are not in the first attribute extent, switch
1067                          * to it, but first ensure the changes will make it to
1068                          * disk later.
1069                          */
1070                         if (a->data.non_resident.lowest_vcn) {
1071                                 flush_dcache_mft_record_page(ctx->ntfs_ino);
1072                                 mark_mft_record_dirty(ctx->ntfs_ino);
1073                                 ntfs_attr_reinit_search_ctx(ctx);
1074                                 err = ntfs_attr_lookup(ni->type, ni->name,
1075                                                 ni->name_len, CASE_SENSITIVE,
1076                                                 0, NULL, 0, ctx);
1077                                 if (unlikely(err)) {
1078                                         status.attr_switched = 1;
1079                                         break;
1080                                 }
1081                                 /* @m is not used any more so do not set it. */
1082                                 a = ctx->attr;
1083                         }
1084                         write_lock_irqsave(&ni->size_lock, flags);
1085                         ni->itype.compressed.size += vol->cluster_size;
1086                         a->data.non_resident.compressed_size =
1087                                         cpu_to_sle64(ni->itype.compressed.size);
1088                         write_unlock_irqrestore(&ni->size_lock, flags);
1089                 }
1090                 /* Ensure the changes make it to disk. */
1091                 flush_dcache_mft_record_page(ctx->ntfs_ino);
1092                 mark_mft_record_dirty(ctx->ntfs_ino);
1093                 ntfs_attr_put_search_ctx(ctx);
1094                 unmap_mft_record(base_ni);
1095                 /* Successfully filled the hole. */
1096                 status.runlist_merged = 0;
1097                 status.mft_attr_mapped = 0;
1098                 status.mp_rebuilt = 0;
1099                 /* Setup the map cache and use that to deal with the buffer. */
1100                 was_hole = TRUE;
1101                 vcn = bh_cpos;
1102                 vcn_len = 1;
1103                 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
1104                 cdelta = 0;
1105                 /*
1106                  * If the number of remaining clusters in the @pages is smaller
1107                  * or equal to the number of cached clusters, unlock the
1108                  * runlist as the map cache will be used from now on.
1109                  */
1110                 if (likely(vcn + vcn_len >= cend)) {
1111                         up_write(&ni->runlist.lock);
1112                         rl_write_locked = FALSE;
1113                         rl = NULL;
1114                 }
1115                 goto map_buffer_cached;
1116         } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1117         /* If there are no errors, do the next page. */
1118         if (likely(!err && ++u < nr_pages))
1119                 goto do_next_page;
1120         /* If there are no errors, release the runlist lock if we took it. */
1121         if (likely(!err)) {
1122                 if (unlikely(rl_write_locked)) {
1123                         up_write(&ni->runlist.lock);
1124                         rl_write_locked = FALSE;
1125                 } else if (unlikely(rl))
1126                         up_read(&ni->runlist.lock);
1127                 rl = NULL;
1128         }
1129         /* If we issued read requests, let them complete. */
1130         read_lock_irqsave(&ni->size_lock, flags);
1131         initialized_size = ni->initialized_size;
1132         read_unlock_irqrestore(&ni->size_lock, flags);
1133         while (wait_bh > wait) {
1134                 bh = *--wait_bh;
1135                 wait_on_buffer(bh);
1136                 if (likely(buffer_uptodate(bh))) {
1137                         page = bh->b_page;
1138                         bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
1139                                         bh_offset(bh);
1140                         /*
1141                          * If the buffer overflows the initialized size, need
1142                          * to zero the overflowing region.
1143                          */
1144                         if (unlikely(bh_pos + blocksize > initialized_size)) {
1145                                 u8 *kaddr;
1146                                 int ofs = 0;
1147
1148                                 if (likely(bh_pos < initialized_size))
1149                                         ofs = initialized_size - bh_pos;
1150                                 kaddr = kmap_atomic(page, KM_USER0);
1151                                 memset(kaddr + bh_offset(bh) + ofs, 0,
1152                                                 blocksize - ofs);
1153                                 kunmap_atomic(kaddr, KM_USER0);
1154                                 flush_dcache_page(page);
1155                         }
1156                 } else /* if (unlikely(!buffer_uptodate(bh))) */
1157                         err = -EIO;
1158         }
1159         if (likely(!err)) {
1160                 /* Clear buffer_new on all buffers. */
1161                 u = 0;
1162                 do {
1163                         bh = head = page_buffers(pages[u]);
1164                         do {
1165                                 if (buffer_new(bh))
1166                                         clear_buffer_new(bh);
1167                         } while ((bh = bh->b_this_page) != head);
1168                 } while (++u < nr_pages);
1169                 ntfs_debug("Done.");
1170                 return err;
1171         }
1172         if (status.attr_switched) {
1173                 /* Get back to the attribute extent we modified. */
1174                 ntfs_attr_reinit_search_ctx(ctx);
1175                 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1176                                 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
1177                         ntfs_error(vol->sb, "Failed to find required "
1178                                         "attribute extent of attribute in "
1179                                         "error code path.  Run chkdsk to "
1180                                         "recover.");
1181                         write_lock_irqsave(&ni->size_lock, flags);
1182                         ni->itype.compressed.size += vol->cluster_size;
1183                         write_unlock_irqrestore(&ni->size_lock, flags);
1184                         flush_dcache_mft_record_page(ctx->ntfs_ino);
1185                         mark_mft_record_dirty(ctx->ntfs_ino);
1186                         /*
1187                          * The only thing that is now wrong is the compressed
1188                          * size of the base attribute extent which chkdsk
1189                          * should be able to fix.
1190                          */
1191                         NVolSetErrors(vol);
1192                 } else {
1193                         m = ctx->mrec;
1194                         a = ctx->attr;
1195                         status.attr_switched = 0;
1196                 }
1197         }
1198         /*
1199          * If the runlist has been modified, need to restore it by punching a
1200          * hole into it and we then need to deallocate the on-disk cluster as
1201          * well.  Note, we only modify the runlist if we are able to generate a
1202          * new mapping pairs array, i.e. only when the mapped attribute extent
1203          * is not switched.
1204          */
1205         if (status.runlist_merged && !status.attr_switched) {
1206                 BUG_ON(!rl_write_locked);
1207                 /* Make the file cluster we allocated sparse in the runlist. */
1208                 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
1209                         ntfs_error(vol->sb, "Failed to punch hole into "
1210                                         "attribute runlist in error code "
1211                                         "path.  Run chkdsk to recover the "
1212                                         "lost cluster.");
1213                         make_bad_inode(vi);
1214                         make_bad_inode(VFS_I(base_ni));
1215                         NVolSetErrors(vol);
1216                 } else /* if (success) */ {
1217                         status.runlist_merged = 0;
1218                         /*
1219                          * Deallocate the on-disk cluster we allocated but only
1220                          * if we succeeded in punching its vcn out of the
1221                          * runlist.
1222                          */
1223                         down_write(&vol->lcnbmp_lock);
1224                         if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
1225                                 ntfs_error(vol->sb, "Failed to release "
1226                                                 "allocated cluster in error "
1227                                                 "code path.  Run chkdsk to "
1228                                                 "recover the lost cluster.");
1229                                 NVolSetErrors(vol);
1230                         }
1231                         up_write(&vol->lcnbmp_lock);
1232                 }
1233         }
1234         /*
1235          * Resize the attribute record to its old size and rebuild the mapping
1236          * pairs array.  Note, we only can do this if the runlist has been
1237          * restored to its old state which also implies that the mapped
1238          * attribute extent is not switched.
1239          */
1240         if (status.mp_rebuilt && !status.runlist_merged) {
1241                 if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
1242                         ntfs_error(vol->sb, "Failed to restore attribute "
1243                                         "record in error code path.  Run "
1244                                         "chkdsk to recover.");
1245                         make_bad_inode(vi);
1246                         make_bad_inode(VFS_I(base_ni));
1247                         NVolSetErrors(vol);
1248                 } else /* if (success) */ {
1249                         if (ntfs_mapping_pairs_build(vol, (u8*)a +
1250                                         le16_to_cpu(a->data.non_resident.
1251                                         mapping_pairs_offset), attr_rec_len -
1252                                         le16_to_cpu(a->data.non_resident.
1253                                         mapping_pairs_offset), ni->runlist.rl,
1254                                         vcn, highest_vcn, NULL)) {
1255                                 ntfs_error(vol->sb, "Failed to restore "
1256                                                 "mapping pairs array in error "
1257                                                 "code path.  Run chkdsk to "
1258                                                 "recover.");
1259                                 make_bad_inode(vi);
1260                                 make_bad_inode(VFS_I(base_ni));
1261                                 NVolSetErrors(vol);
1262                         }
1263                         flush_dcache_mft_record_page(ctx->ntfs_ino);
1264                         mark_mft_record_dirty(ctx->ntfs_ino);
1265                 }
1266         }
1267         /* Release the mft record and the attribute. */
1268         if (status.mft_attr_mapped) {
1269                 ntfs_attr_put_search_ctx(ctx);
1270                 unmap_mft_record(base_ni);
1271         }
1272         /* Release the runlist lock. */
1273         if (rl_write_locked)
1274                 up_write(&ni->runlist.lock);
1275         else if (rl)
1276                 up_read(&ni->runlist.lock);
1277         /*
1278          * Zero out any newly allocated blocks to avoid exposing stale data.
1279          * If BH_New is set, we know that the block was newly allocated above
1280          * and that it has not been fully zeroed and marked dirty yet.
1281          */
1282         nr_pages = u;
1283         u = 0;
1284         end = bh_cpos << vol->cluster_size_bits;
1285         do {
1286                 page = pages[u];
1287                 bh = head = page_buffers(page);
1288                 do {
1289                         if (u == nr_pages &&
1290                                         ((s64)page->index << PAGE_CACHE_SHIFT) +
1291                                         bh_offset(bh) >= end)
1292                                 break;
1293                         if (!buffer_new(bh))
1294                                 continue;
1295                         clear_buffer_new(bh);
1296                         if (!buffer_uptodate(bh)) {
1297                                 if (PageUptodate(page))
1298                                         set_buffer_uptodate(bh);
1299                                 else {
1300                                         u8 *kaddr = kmap_atomic(page, KM_USER0);
1301                                         memset(kaddr + bh_offset(bh), 0,
1302                                                         blocksize);
1303                                         kunmap_atomic(kaddr, KM_USER0);
1304                                         flush_dcache_page(page);
1305                                         set_buffer_uptodate(bh);
1306                                 }
1307                         }
1308                         mark_buffer_dirty(bh);
1309                 } while ((bh = bh->b_this_page) != head);
1310         } while (++u <= nr_pages);
1311         ntfs_error(vol->sb, "Failed.  Returning error code %i.", err);
1312         return err;
1313 }
1314
1315 /*
1316  * Copy as much as we can into the pages and return the number of bytes which
1317  * were sucessfully copied.  If a fault is encountered then clear the pages
1318  * out to (ofs + bytes) and return the number of bytes which were copied.
1319  */
1320 static inline size_t ntfs_copy_from_user(struct page **pages,
1321                 unsigned nr_pages, unsigned ofs, const char __user *buf,
1322                 size_t bytes)
1323 {
1324         struct page **last_page = pages + nr_pages;
1325         char *kaddr;
1326         size_t total = 0;
1327         unsigned len;
1328         int left;
1329
1330         do {
1331                 len = PAGE_CACHE_SIZE - ofs;
1332                 if (len > bytes)
1333                         len = bytes;
1334                 kaddr = kmap_atomic(*pages, KM_USER0);
1335                 left = __copy_from_user_inatomic(kaddr + ofs, buf, len);
1336                 kunmap_atomic(kaddr, KM_USER0);
1337                 if (unlikely(left)) {
1338                         /* Do it the slow way. */
1339                         kaddr = kmap(*pages);
1340                         left = __copy_from_user(kaddr + ofs, buf, len);
1341                         kunmap(*pages);
1342                         if (unlikely(left))
1343                                 goto err_out;
1344                 }
1345                 total += len;
1346                 bytes -= len;
1347                 if (!bytes)
1348                         break;
1349                 buf += len;
1350                 ofs = 0;
1351         } while (++pages < last_page);
1352 out:
1353         return total;
1354 err_out:
1355         total += len - left;
1356         /* Zero the rest of the target like __copy_from_user(). */
1357         while (++pages < last_page) {
1358                 bytes -= len;
1359                 if (!bytes)
1360                         break;
1361                 len = PAGE_CACHE_SIZE;
1362                 if (len > bytes)
1363                         len = bytes;
1364                 kaddr = kmap_atomic(*pages, KM_USER0);
1365                 memset(kaddr, 0, len);
1366                 kunmap_atomic(kaddr, KM_USER0);
1367         }
1368         goto out;
1369 }
1370
1371 static size_t __ntfs_copy_from_user_iovec(char *vaddr,
1372                 const struct iovec *iov, size_t iov_ofs, size_t bytes)
1373 {
1374         size_t total = 0;
1375
1376         while (1) {
1377                 const char __user *buf = iov->iov_base + iov_ofs;
1378                 unsigned len;
1379                 size_t left;
1380
1381                 len = iov->iov_len - iov_ofs;
1382                 if (len > bytes)
1383                         len = bytes;
1384                 left = __copy_from_user_inatomic(vaddr, buf, len);
1385                 total += len;
1386                 bytes -= len;
1387                 vaddr += len;
1388                 if (unlikely(left)) {
1389                         /*
1390                          * Zero the rest of the target like __copy_from_user().
1391                          */
1392                         memset(vaddr, 0, bytes);
1393                         total -= left;
1394                         break;
1395                 }
1396                 if (!bytes)
1397                         break;
1398                 iov++;
1399                 iov_ofs = 0;
1400         }
1401         return total;
1402 }
1403
1404 static inline void ntfs_set_next_iovec(const struct iovec **iovp,
1405                 size_t *iov_ofsp, size_t bytes)
1406 {
1407         const struct iovec *iov = *iovp;
1408         size_t iov_ofs = *iov_ofsp;
1409
1410         while (bytes) {
1411                 unsigned len;
1412
1413                 len = iov->iov_len - iov_ofs;
1414                 if (len > bytes)
1415                         len = bytes;
1416                 bytes -= len;
1417                 iov_ofs += len;
1418                 if (iov->iov_len == iov_ofs) {
1419                         iov++;
1420                         iov_ofs = 0;
1421                 }
1422         }
1423         *iovp = iov;
1424         *iov_ofsp = iov_ofs;
1425 }
1426
1427 /*
1428  * This has the same side-effects and return value as ntfs_copy_from_user().
1429  * The difference is that on a fault we need to memset the remainder of the
1430  * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
1431  * single-segment behaviour.
1432  *
1433  * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and
1434  * when not atomic.  This is ok because __ntfs_copy_from_user_iovec() calls
1435  * __copy_from_user_inatomic() and it is ok to call this when non-atomic.  In
1436  * fact, the only difference between __copy_from_user_inatomic() and
1437  * __copy_from_user() is that the latter calls might_sleep().  And on many
1438  * architectures __copy_from_user_inatomic() is just defined to
1439  * __copy_from_user() so it makes no difference at all on those architectures.
1440  */
1441 static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1442                 unsigned nr_pages, unsigned ofs, const struct iovec **iov,
1443                 size_t *iov_ofs, size_t bytes)
1444 {
1445         struct page **last_page = pages + nr_pages;
1446         char *kaddr;
1447         size_t copied, len, total = 0;
1448
1449         do {
1450                 len = PAGE_CACHE_SIZE - ofs;
1451                 if (len > bytes)
1452                         len = bytes;
1453                 kaddr = kmap_atomic(*pages, KM_USER0);
1454                 copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
1455                                 *iov, *iov_ofs, len);
1456                 kunmap_atomic(kaddr, KM_USER0);
1457                 if (unlikely(copied != len)) {
1458                         /* Do it the slow way. */
1459                         kaddr = kmap(*pages);
1460                         copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
1461                                         *iov, *iov_ofs, len);
1462                         kunmap(*pages);
1463                         if (unlikely(copied != len))
1464                                 goto err_out;
1465                 }
1466                 total += len;
1467                 bytes -= len;
1468                 if (!bytes)
1469                         break;
1470                 ntfs_set_next_iovec(iov, iov_ofs, len);
1471                 ofs = 0;
1472         } while (++pages < last_page);
1473 out:
1474         return total;
1475 err_out:
1476         total += copied;
1477         /* Zero the rest of the target like __copy_from_user(). */
1478         while (++pages < last_page) {
1479                 bytes -= len;
1480                 if (!bytes)
1481                         break;
1482                 len = PAGE_CACHE_SIZE;
1483                 if (len > bytes)
1484                         len = bytes;
1485                 kaddr = kmap_atomic(*pages, KM_USER0);
1486                 memset(kaddr, 0, len);
1487                 kunmap_atomic(kaddr, KM_USER0);
1488         }
1489         goto out;
1490 }
1491
1492 static inline void ntfs_flush_dcache_pages(struct page **pages,
1493                 unsigned nr_pages)
1494 {
1495         BUG_ON(!nr_pages);
1496         do {
1497                 /*
1498                  * Warning: Do not do the decrement at the same time as the
1499                  * call because flush_dcache_page() is a NULL macro on i386
1500                  * and hence the decrement never happens.
1501                  */
1502                 flush_dcache_page(pages[nr_pages]);
1503         } while (--nr_pages > 0);
1504 }
1505
1506 /**
1507  * ntfs_commit_pages_after_non_resident_write - commit the received data
1508  * @pages:      array of destination pages
1509  * @nr_pages:   number of pages in @pages
1510  * @pos:        byte position in file at which the write begins
1511  * @bytes:      number of bytes to be written
1512  *
1513  * See description of ntfs_commit_pages_after_write(), below.
1514  */
1515 static inline int ntfs_commit_pages_after_non_resident_write(
1516                 struct page **pages, const unsigned nr_pages,
1517                 s64 pos, size_t bytes)
1518 {
1519         s64 end, initialized_size;
1520         struct inode *vi;
1521         ntfs_inode *ni, *base_ni;
1522         struct buffer_head *bh, *head;
1523         ntfs_attr_search_ctx *ctx;
1524         MFT_RECORD *m;
1525         ATTR_RECORD *a;
1526         unsigned long flags;
1527         unsigned blocksize, u;
1528         int err;
1529
1530         vi = pages[0]->mapping->host;
1531         ni = NTFS_I(vi);
1532         blocksize = 1 << vi->i_blkbits;
1533         end = pos + bytes;
1534         u = 0;
1535         do {
1536                 s64 bh_pos;
1537                 struct page *page;
1538                 BOOL partial;
1539
1540                 page = pages[u];
1541                 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
1542                 bh = head = page_buffers(page);
1543                 partial = FALSE;
1544                 do {
1545                         s64 bh_end;
1546
1547                         bh_end = bh_pos + blocksize;
1548                         if (bh_end <= pos || bh_pos >= end) {
1549                                 if (!buffer_uptodate(bh))
1550                                         partial = TRUE;
1551                         } else {
1552                                 set_buffer_uptodate(bh);
1553                                 mark_buffer_dirty(bh);
1554                         }
1555                 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1556                 /*
1557                  * If all buffers are now uptodate but the page is not, set the
1558                  * page uptodate.
1559                  */
1560                 if (!partial && !PageUptodate(page))
1561                         SetPageUptodate(page);
1562         } while (++u < nr_pages);
1563         /*
1564          * Finally, if we do not need to update initialized_size or i_size we
1565          * are finished.
1566          */
1567         read_lock_irqsave(&ni->size_lock, flags);
1568         initialized_size = ni->initialized_size;
1569         read_unlock_irqrestore(&ni->size_lock, flags);
1570         if (end <= initialized_size) {
1571                 ntfs_debug("Done.");
1572                 return 0;
1573         }
1574         /*
1575          * Update initialized_size/i_size as appropriate, both in the inode and
1576          * the mft record.
1577          */
1578         if (!NInoAttr(ni))
1579                 base_ni = ni;
1580         else
1581                 base_ni = ni->ext.base_ntfs_ino;
1582         /* Map, pin, and lock the mft record. */
1583         m = map_mft_record(base_ni);
1584         if (IS_ERR(m)) {
1585                 err = PTR_ERR(m);
1586                 m = NULL;
1587                 ctx = NULL;
1588                 goto err_out;
1589         }
1590         BUG_ON(!NInoNonResident(ni));
1591         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1592         if (unlikely(!ctx)) {
1593                 err = -ENOMEM;
1594                 goto err_out;
1595         }
1596         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1597                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1598         if (unlikely(err)) {
1599                 if (err == -ENOENT)
1600                         err = -EIO;
1601                 goto err_out;
1602         }
1603         a = ctx->attr;
1604         BUG_ON(!a->non_resident);
1605         write_lock_irqsave(&ni->size_lock, flags);
1606         BUG_ON(end > ni->allocated_size);
1607         ni->initialized_size = end;
1608         a->data.non_resident.initialized_size = cpu_to_sle64(end);
1609         if (end > i_size_read(vi)) {
1610                 i_size_write(vi, end);
1611                 a->data.non_resident.data_size =
1612                                 a->data.non_resident.initialized_size;
1613         }
1614         write_unlock_irqrestore(&ni->size_lock, flags);
1615         /* Mark the mft record dirty, so it gets written back. */
1616         flush_dcache_mft_record_page(ctx->ntfs_ino);
1617         mark_mft_record_dirty(ctx->ntfs_ino);
1618         ntfs_attr_put_search_ctx(ctx);
1619         unmap_mft_record(base_ni);
1620         ntfs_debug("Done.");
1621         return 0;
1622 err_out:
1623         if (ctx)
1624                 ntfs_attr_put_search_ctx(ctx);
1625         if (m)
1626                 unmap_mft_record(base_ni);
1627         ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
1628                         "code %i).", err);
1629         if (err != -ENOMEM) {
1630                 NVolSetErrors(ni->vol);
1631                 make_bad_inode(VFS_I(base_ni));
1632                 make_bad_inode(vi);
1633         }
1634         return err;
1635 }
1636
1637 /**
1638  * ntfs_commit_pages_after_write - commit the received data
1639  * @pages:      array of destination pages
1640  * @nr_pages:   number of pages in @pages
1641  * @pos:        byte position in file at which the write begins
1642  * @bytes:      number of bytes to be written
1643  *
1644  * This is called from ntfs_file_buffered_write() with i_sem held on the inode
1645  * (@pages[0]->mapping->host).  There are @nr_pages pages in @pages which are
1646  * locked but not kmap()ped.  The source data has already been copied into the
1647  * @page.  ntfs_prepare_pages_for_non_resident_write() has been called before
1648  * the data was copied (for non-resident attributes only) and it returned
1649  * success.
1650  *
1651  * Need to set uptodate and mark dirty all buffers within the boundary of the
1652  * write.  If all buffers in a page are uptodate we set the page uptodate, too.
1653  *
1654  * Setting the buffers dirty ensures that they get written out later when
1655  * ntfs_writepage() is invoked by the VM.
1656  *
1657  * Finally, we need to update i_size and initialized_size as appropriate both
1658  * in the inode and the mft record.
1659  *
1660  * This is modelled after fs/buffer.c::generic_commit_write(), which marks
1661  * buffers uptodate and dirty, sets the page uptodate if all buffers in the
1662  * page are uptodate, and updates i_size if the end of io is beyond i_size.  In
1663  * that case, it also marks the inode dirty.
1664  *
1665  * If things have gone as outlined in
1666  * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
1667  * content modifications here for non-resident attributes.  For resident
1668  * attributes we need to do the uptodate bringing here which we combine with
1669  * the copying into the mft record which means we save one atomic kmap.
1670  *
1671  * Return 0 on success or -errno on error.
1672  */
1673 static int ntfs_commit_pages_after_write(struct page **pages,
1674                 const unsigned nr_pages, s64 pos, size_t bytes)
1675 {
1676         s64 end, initialized_size;
1677         loff_t i_size;
1678         struct inode *vi;
1679         ntfs_inode *ni, *base_ni;
1680         struct page *page;
1681         ntfs_attr_search_ctx *ctx;
1682         MFT_RECORD *m;
1683         ATTR_RECORD *a;
1684         char *kattr, *kaddr;
1685         unsigned long flags;
1686         u32 attr_len;
1687         int err;
1688
1689         BUG_ON(!nr_pages);
1690         BUG_ON(!pages);
1691         page = pages[0];
1692         BUG_ON(!page);
1693         vi = page->mapping->host;
1694         ni = NTFS_I(vi);
1695         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1696                         "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%x.",
1697                         vi->i_ino, ni->type, page->index, nr_pages,
1698                         (long long)pos, bytes);
1699         if (NInoNonResident(ni))
1700                 return ntfs_commit_pages_after_non_resident_write(pages,
1701                                 nr_pages, pos, bytes);
1702         BUG_ON(nr_pages > 1);
1703         /*
1704          * Attribute is resident, implying it is not compressed, encrypted, or
1705          * sparse.
1706          */
1707         if (!NInoAttr(ni))
1708                 base_ni = ni;
1709         else
1710                 base_ni = ni->ext.base_ntfs_ino;
1711         BUG_ON(NInoNonResident(ni));
1712         /* Map, pin, and lock the mft record. */
1713         m = map_mft_record(base_ni);
1714         if (IS_ERR(m)) {
1715                 err = PTR_ERR(m);
1716                 m = NULL;
1717                 ctx = NULL;
1718                 goto err_out;
1719         }
1720         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1721         if (unlikely(!ctx)) {
1722                 err = -ENOMEM;
1723                 goto err_out;
1724         }
1725         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1726                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1727         if (unlikely(err)) {
1728                 if (err == -ENOENT)
1729                         err = -EIO;
1730                 goto err_out;
1731         }
1732         a = ctx->attr;
1733         BUG_ON(a->non_resident);
1734         /* The total length of the attribute value. */
1735         attr_len = le32_to_cpu(a->data.resident.value_length);
1736         i_size = i_size_read(vi);
1737         BUG_ON(attr_len != i_size);
1738         BUG_ON(pos > attr_len);
1739         end = pos + bytes;
1740         BUG_ON(end > le32_to_cpu(a->length) -
1741                         le16_to_cpu(a->data.resident.value_offset));
1742         kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1743         kaddr = kmap_atomic(page, KM_USER0);
1744         /* Copy the received data from the page to the mft record. */
1745         memcpy(kattr + pos, kaddr + pos, bytes);
1746         /* Update the attribute length if necessary. */
1747         if (end > attr_len) {
1748                 attr_len = end;
1749                 a->data.resident.value_length = cpu_to_le32(attr_len);
1750         }
1751         /*
1752          * If the page is not uptodate, bring the out of bounds area(s)
1753          * uptodate by copying data from the mft record to the page.
1754          */
1755         if (!PageUptodate(page)) {
1756                 if (pos > 0)
1757                         memcpy(kaddr, kattr, pos);
1758                 if (end < attr_len)
1759                         memcpy(kaddr + end, kattr + end, attr_len - end);
1760                 /* Zero the region outside the end of the attribute value. */
1761                 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1762                 flush_dcache_page(page);
1763                 SetPageUptodate(page);
1764         }
1765         kunmap_atomic(kaddr, KM_USER0);
1766         /* Update initialized_size/i_size if necessary. */
1767         read_lock_irqsave(&ni->size_lock, flags);
1768         initialized_size = ni->initialized_size;
1769         BUG_ON(end > ni->allocated_size);
1770         read_unlock_irqrestore(&ni->size_lock, flags);
1771         BUG_ON(initialized_size != i_size);
1772         if (end > initialized_size) {
1773                 unsigned long flags;
1774
1775                 write_lock_irqsave(&ni->size_lock, flags);
1776                 ni->initialized_size = end;
1777                 i_size_write(vi, end);
1778                 write_unlock_irqrestore(&ni->size_lock, flags);
1779         }
1780         /* Mark the mft record dirty, so it gets written back. */
1781         flush_dcache_mft_record_page(ctx->ntfs_ino);
1782         mark_mft_record_dirty(ctx->ntfs_ino);
1783         ntfs_attr_put_search_ctx(ctx);
1784         unmap_mft_record(base_ni);
1785         ntfs_debug("Done.");
1786         return 0;
1787 err_out:
1788         if (err == -ENOMEM) {
1789                 ntfs_warning(vi->i_sb, "Error allocating memory required to "
1790                                 "commit the write.");
1791                 if (PageUptodate(page)) {
1792                         ntfs_warning(vi->i_sb, "Page is uptodate, setting "
1793                                         "dirty so the write will be retried "
1794                                         "later on by the VM.");
1795                         /*
1796                          * Put the page on mapping->dirty_pages, but leave its
1797                          * buffers' dirty state as-is.
1798                          */
1799                         __set_page_dirty_nobuffers(page);
1800                         err = 0;
1801                 } else
1802                         ntfs_error(vi->i_sb, "Page is not uptodate.  Written "
1803                                         "data has been lost.");
1804         } else {
1805                 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
1806                                 "with error %i.", err);
1807                 NVolSetErrors(ni->vol);
1808                 make_bad_inode(VFS_I(base_ni));
1809                 make_bad_inode(vi);
1810         }
1811         if (ctx)
1812                 ntfs_attr_put_search_ctx(ctx);
1813         if (m)
1814                 unmap_mft_record(base_ni);
1815         return err;
1816 }
1817
1818 /**
1819  * ntfs_file_buffered_write -
1820  *
1821  * Locking: The vfs is holding ->i_sem on the inode.
1822  */
1823 static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
1824                 const struct iovec *iov, unsigned long nr_segs,
1825                 loff_t pos, loff_t *ppos, size_t count)
1826 {
1827         struct file *file = iocb->ki_filp;
1828         struct address_space *mapping = file->f_mapping;
1829         struct inode *vi = mapping->host;
1830         ntfs_inode *ni = NTFS_I(vi);
1831         ntfs_volume *vol = ni->vol;
1832         struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
1833         struct page *cached_page = NULL;
1834         char __user *buf = NULL;
1835         s64 end, ll;
1836         VCN last_vcn;
1837         LCN lcn;
1838         unsigned long flags;
1839         size_t bytes, iov_ofs;
1840         ssize_t status, written;
1841         unsigned nr_pages;
1842         int err;
1843         struct pagevec lru_pvec;
1844
1845         ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1846                         "pos 0x%llx, count 0x%lx.",
1847                         vi->i_ino, (unsigned)le32_to_cpu(ni->type),
1848                         (unsigned long long)pos, (unsigned long)count);
1849         if (unlikely(!count))
1850                 return 0;
1851         BUG_ON(NInoMstProtected(ni));
1852         /*
1853          * If the attribute is not an index root and it is encrypted or
1854          * compressed, we cannot write to it yet.  Note we need to check for
1855          * AT_INDEX_ALLOCATION since this is the type of both directory and
1856          * index inodes.
1857          */
1858         if (ni->type != AT_INDEX_ALLOCATION) {
1859                 /* If file is encrypted, deny access, just like NT4. */
1860                 if (NInoEncrypted(ni)) {
1861                         /*
1862                          * Reminder for later: Encrypted files are _always_
1863                          * non-resident so that the content can always be
1864                          * encrypted.
1865                          */
1866                         ntfs_debug("Denying write access to encrypted file.");
1867                         return -EACCES;
1868                 }
1869                 if (NInoCompressed(ni)) {
1870                         /* Only unnamed $DATA attribute can be compressed. */
1871                         BUG_ON(ni->type != AT_DATA);
1872                         BUG_ON(ni->name_len);
1873                         /*
1874                          * Reminder for later: If resident, the data is not
1875                          * actually compressed.  Only on the switch to non-
1876                          * resident does compression kick in.  This is in
1877                          * contrast to encrypted files (see above).
1878                          */
1879                         ntfs_error(vi->i_sb, "Writing to compressed files is "
1880                                         "not implemented yet.  Sorry.");
1881                         return -EOPNOTSUPP;
1882                 }
1883         }
1884         /*
1885          * If a previous ntfs_truncate() failed, repeat it and abort if it
1886          * fails again.
1887          */
1888         if (unlikely(NInoTruncateFailed(ni))) {
1889                 down_write(&vi->i_alloc_sem);
1890                 err = ntfs_truncate(vi);
1891                 up_write(&vi->i_alloc_sem);
1892                 if (err || NInoTruncateFailed(ni)) {
1893                         if (!err)
1894                                 err = -EIO;
1895                         ntfs_error(vol->sb, "Cannot perform write to inode "
1896                                         "0x%lx, attribute type 0x%x, because "
1897                                         "ntfs_truncate() failed (error code "
1898                                         "%i).", vi->i_ino,
1899                                         (unsigned)le32_to_cpu(ni->type), err);
1900                         return err;
1901                 }
1902         }
1903         /* The first byte after the write. */
1904         end = pos + count;
1905         /*
1906          * If the write goes beyond the allocated size, extend the allocation
1907          * to cover the whole of the write, rounded up to the nearest cluster.
1908          */
1909         read_lock_irqsave(&ni->size_lock, flags);
1910         ll = ni->allocated_size;
1911         read_unlock_irqrestore(&ni->size_lock, flags);
1912         if (end > ll) {
1913                 /* Extend the allocation without changing the data size. */
1914                 ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
1915                 if (likely(ll >= 0)) {
1916                         BUG_ON(pos >= ll);
1917                         /* If the extension was partial truncate the write. */
1918                         if (end > ll) {
1919                                 ntfs_debug("Truncating write to inode 0x%lx, "
1920                                                 "attribute type 0x%x, because "
1921                                                 "the allocation was only "
1922                                                 "partially extended.",
1923                                                 vi->i_ino, (unsigned)
1924                                                 le32_to_cpu(ni->type));
1925                                 end = ll;
1926                                 count = ll - pos;
1927                         }
1928                 } else {
1929                         err = ll;
1930                         read_lock_irqsave(&ni->size_lock, flags);
1931                         ll = ni->allocated_size;
1932                         read_unlock_irqrestore(&ni->size_lock, flags);
1933                         /* Perform a partial write if possible or fail. */
1934                         if (pos < ll) {
1935                                 ntfs_debug("Truncating write to inode 0x%lx, "
1936                                                 "attribute type 0x%x, because "
1937                                                 "extending the allocation "
1938                                                 "failed (error code %i).",
1939                                                 vi->i_ino, (unsigned)
1940                                                 le32_to_cpu(ni->type), err);
1941                                 end = ll;
1942                                 count = ll - pos;
1943                         } else {
1944                                 ntfs_error(vol->sb, "Cannot perform write to "
1945                                                 "inode 0x%lx, attribute type "
1946                                                 "0x%x, because extending the "
1947                                                 "allocation failed (error "
1948                                                 "code %i).", vi->i_ino,
1949                                                 (unsigned)
1950                                                 le32_to_cpu(ni->type), err);
1951                                 return err;
1952                         }
1953                 }
1954         }
1955         pagevec_init(&lru_pvec, 0);
1956         written = 0;
1957         /*
1958          * If the write starts beyond the initialized size, extend it up to the
1959          * beginning of the write and initialize all non-sparse space between
1960          * the old initialized size and the new one.  This automatically also
1961          * increments the vfs inode->i_size to keep it above or equal to the
1962          * initialized_size.
1963          */
1964         read_lock_irqsave(&ni->size_lock, flags);
1965         ll = ni->initialized_size;
1966         read_unlock_irqrestore(&ni->size_lock, flags);
1967         if (pos > ll) {
1968                 err = ntfs_attr_extend_initialized(ni, pos, &cached_page,
1969                                 &lru_pvec);
1970                 if (err < 0) {
1971                         ntfs_error(vol->sb, "Cannot perform write to inode "
1972                                         "0x%lx, attribute type 0x%x, because "
1973                                         "extending the initialized size "
1974                                         "failed (error code %i).", vi->i_ino,
1975                                         (unsigned)le32_to_cpu(ni->type), err);
1976                         status = err;
1977                         goto err_out;
1978                 }
1979         }
1980         /*
1981          * Determine the number of pages per cluster for non-resident
1982          * attributes.
1983          */
1984         nr_pages = 1;
1985         if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
1986                 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
1987         /* Finally, perform the actual write. */
1988         last_vcn = -1;
1989         if (likely(nr_segs == 1))
1990                 buf = iov->iov_base;
1991         else
1992                 iov_ofs = 0;    /* Offset in the current iovec. */
1993         do {
1994                 VCN vcn;
1995                 pgoff_t idx, start_idx;
1996                 unsigned ofs, do_pages, u;
1997                 size_t copied;
1998
1999                 start_idx = idx = pos >> PAGE_CACHE_SHIFT;
2000                 ofs = pos & ~PAGE_CACHE_MASK;
2001                 bytes = PAGE_CACHE_SIZE - ofs;
2002                 do_pages = 1;
2003                 if (nr_pages > 1) {
2004                         vcn = pos >> vol->cluster_size_bits;
2005                         if (vcn != last_vcn) {
2006                                 last_vcn = vcn;
2007                                 /*
2008                                  * Get the lcn of the vcn the write is in.  If
2009                                  * it is a hole, need to lock down all pages in
2010                                  * the cluster.
2011                                  */
2012                                 down_read(&ni->runlist.lock);
2013                                 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
2014                                                 vol->cluster_size_bits, FALSE);
2015                                 up_read(&ni->runlist.lock);
2016                                 if (unlikely(lcn < LCN_HOLE)) {
2017                                         status = -EIO;
2018                                         if (lcn == LCN_ENOMEM)
2019                                                 status = -ENOMEM;
2020                                         else
2021                                                 ntfs_error(vol->sb, "Cannot "
2022                                                         "perform write to "
2023                                                         "inode 0x%lx, "
2024                                                         "attribute type 0x%x, "
2025                                                         "because the attribute "
2026                                                         "is corrupt.",
2027                                                         vi->i_ino, (unsigned)
2028                                                         le32_to_cpu(ni->type));
2029                                         break;
2030                                 }
2031                                 if (lcn == LCN_HOLE) {
2032                                         start_idx = (pos & ~(s64)
2033                                                         vol->cluster_size_mask)
2034                                                         >> PAGE_CACHE_SHIFT;
2035                                         bytes = vol->cluster_size - (pos &
2036                                                         vol->cluster_size_mask);
2037                                         do_pages = nr_pages;
2038                                 }
2039                         }
2040                 }
2041                 if (bytes > count)
2042                         bytes = count;
2043                 /*
2044                  * Bring in the user page(s) that we will copy from _first_.
2045                  * Otherwise there is a nasty deadlock on copying from the same
2046                  * page(s) as we are writing to, without it/them being marked
2047                  * up-to-date.  Note, at present there is nothing to stop the
2048                  * pages being swapped out between us bringing them into memory
2049                  * and doing the actual copying.
2050                  */
2051                 if (likely(nr_segs == 1))
2052                         ntfs_fault_in_pages_readable(buf, bytes);
2053                 else
2054                         ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
2055                 /* Get and lock @do_pages starting at index @start_idx. */
2056                 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
2057                                 pages, &cached_page, &lru_pvec);
2058                 if (unlikely(status))
2059                         break;
2060                 /*
2061                  * For non-resident attributes, we need to fill any holes with
2062                  * actual clusters and ensure all bufferes are mapped.  We also
2063                  * need to bring uptodate any buffers that are only partially
2064                  * being written to.
2065                  */
2066                 if (NInoNonResident(ni)) {
2067                         status = ntfs_prepare_pages_for_non_resident_write(
2068                                         pages, do_pages, pos, bytes);
2069                         if (unlikely(status)) {
2070                                 loff_t i_size;
2071
2072                                 do {
2073                                         unlock_page(pages[--do_pages]);
2074                                         page_cache_release(pages[do_pages]);
2075                                 } while (do_pages);
2076                                 /*
2077                                  * The write preparation may have instantiated
2078                                  * allocated space outside i_size.  Trim this
2079                                  * off again.  We can ignore any errors in this
2080                                  * case as we will just be waisting a bit of
2081                                  * allocated space, which is not a disaster.
2082                                  */
2083                                 i_size = i_size_read(vi);
2084                                 if (pos + bytes > i_size)
2085                                         vmtruncate(vi, i_size);
2086                                 break;
2087                         }
2088                 }
2089                 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
2090                 if (likely(nr_segs == 1)) {
2091                         copied = ntfs_copy_from_user(pages + u, do_pages - u,
2092                                         ofs, buf, bytes);
2093                         buf += copied;
2094                 } else
2095                         copied = ntfs_copy_from_user_iovec(pages + u,
2096                                         do_pages - u, ofs, &iov, &iov_ofs,
2097                                         bytes);
2098                 ntfs_flush_dcache_pages(pages + u, do_pages - u);
2099                 status = ntfs_commit_pages_after_write(pages, do_pages, pos,
2100                                 bytes);
2101                 if (likely(!status)) {
2102                         written += copied;
2103                         count -= copied;
2104                         pos += copied;
2105                         if (unlikely(copied != bytes))
2106                                 status = -EFAULT;
2107                 }
2108                 do {
2109                         unlock_page(pages[--do_pages]);
2110                         mark_page_accessed(pages[do_pages]);
2111                         page_cache_release(pages[do_pages]);
2112                 } while (do_pages);
2113                 if (unlikely(status))
2114                         break;
2115                 balance_dirty_pages_ratelimited(mapping);
2116                 cond_resched();
2117         } while (count);
2118 err_out:
2119         *ppos = pos;
2120         if (cached_page)
2121                 page_cache_release(cached_page);
2122         /* For now, when the user asks for O_SYNC, we actually give O_DSYNC. */
2123         if (likely(!status)) {
2124                 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(vi))) {
2125                         if (!mapping->a_ops->writepage || !is_sync_kiocb(iocb))
2126                                 status = generic_osync_inode(vi, mapping,
2127                                                 OSYNC_METADATA|OSYNC_DATA);
2128                 }
2129         }
2130         pagevec_lru_add(&lru_pvec);
2131         ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
2132                         written ? "written" : "status", (unsigned long)written,
2133                         (long)status);
2134         return written ? written : status;
2135 }
2136
2137 /**
2138  * ntfs_file_aio_write_nolock -
2139  */
2140 static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
2141                 const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
2142 {
2143         struct file *file = iocb->ki_filp;
2144         struct address_space *mapping = file->f_mapping;
2145         struct inode *inode = mapping->host;
2146         loff_t pos;
2147         unsigned long seg;
2148         size_t count;           /* after file limit checks */
2149         ssize_t written, err;
2150
2151         count = 0;
2152         for (seg = 0; seg < nr_segs; seg++) {
2153                 const struct iovec *iv = &iov[seg];
2154                 /*
2155                  * If any segment has a negative length, or the cumulative
2156                  * length ever wraps negative then return -EINVAL.
2157                  */
2158                 count += iv->iov_len;
2159                 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
2160                         return -EINVAL;
2161                 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2162                         continue;
2163                 if (!seg)
2164                         return -EFAULT;
2165                 nr_segs = seg;
2166                 count -= iv->iov_len;   /* This segment is no good */
2167                 break;
2168         }
2169         pos = *ppos;
2170         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2171         /* We can write back this queue in page reclaim. */
2172         current->backing_dev_info = mapping->backing_dev_info;
2173         written = 0;
2174         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2175         if (err)
2176                 goto out;
2177         if (!count)
2178                 goto out;
2179         err = remove_suid(file->f_dentry);
2180         if (err)
2181                 goto out;
2182         inode_update_time(inode, 1);
2183         written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
2184                         count);
2185 out:
2186         current->backing_dev_info = NULL;
2187         return written ? written : err;
2188 }
2189
2190 /**
2191  * ntfs_file_aio_write -
2192  */
2193 static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
2194                 size_t count, loff_t pos)
2195 {
2196         struct file *file = iocb->ki_filp;
2197         struct address_space *mapping = file->f_mapping;
2198         struct inode *inode = mapping->host;
2199         ssize_t ret;
2200         struct iovec local_iov = { .iov_base = (void __user *)buf,
2201                                    .iov_len = count };
2202
2203         BUG_ON(iocb->ki_pos != pos);
2204
2205         down(&inode->i_sem);
2206         ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
2207         up(&inode->i_sem);
2208         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2209                 int err = sync_page_range(inode, mapping, pos, ret);
2210                 if (err < 0)
2211                         ret = err;
2212         }
2213         return ret;
2214 }
2215
2216 /**
2217  * ntfs_file_writev -
2218  *
2219  * Basically the same as generic_file_writev() except that it ends up calling
2220  * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock().
2221  */
2222 static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
2223                 unsigned long nr_segs, loff_t *ppos)
2224 {
2225         struct address_space *mapping = file->f_mapping;
2226         struct inode *inode = mapping->host;
2227         struct kiocb kiocb;
2228         ssize_t ret;
2229
2230         down(&inode->i_sem);
2231         init_sync_kiocb(&kiocb, file);
2232         ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2233         if (ret == -EIOCBQUEUED)
2234                 ret = wait_on_sync_kiocb(&kiocb);
2235         up(&inode->i_sem);
2236         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2237                 int err = sync_page_range(inode, mapping, *ppos - ret, ret);
2238                 if (err < 0)
2239                         ret = err;
2240         }
2241         return ret;
2242 }
2243
2244 /**
2245  * ntfs_file_write - simple wrapper for ntfs_file_writev()
2246  */
2247 static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
2248                 size_t count, loff_t *ppos)
2249 {
2250         struct iovec local_iov = { .iov_base = (void __user *)buf,
2251                                    .iov_len = count };
2252
2253         return ntfs_file_writev(file, &local_iov, 1, ppos);
2254 }
2255
2256 /**
2257  * ntfs_file_fsync - sync a file to disk
2258  * @filp:       file to be synced
2259  * @dentry:     dentry describing the file to sync
2260  * @datasync:   if non-zero only flush user data and not metadata
2261  *
2262  * Data integrity sync of a file to disk.  Used for fsync, fdatasync, and msync
2263  * system calls.  This function is inspired by fs/buffer.c::file_fsync().
2264  *
2265  * If @datasync is false, write the mft record and all associated extent mft
2266  * records as well as the $DATA attribute and then sync the block device.
2267  *
2268  * If @datasync is true and the attribute is non-resident, we skip the writing
2269  * of the mft record and all associated extent mft records (this might still
2270  * happen due to the write_inode_now() call).
2271  *
2272  * Also, if @datasync is true, we do not wait on the inode to be written out
2273  * but we always wait on the page cache pages to be written out.
2274  *
2275  * Note: In the past @filp could be NULL so we ignore it as we don't need it
2276  * anyway.
2277  *
2278  * Locking: Caller must hold i_sem on the inode.
2279  *
2280  * TODO: We should probably also write all attribute/index inodes associated
2281  * with this inode but since we have no simple way of getting to them we ignore
2282  * this problem for now.
2283  */
2284 static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
2285                 int datasync)
2286 {
2287         struct inode *vi = dentry->d_inode;
2288         int err, ret = 0;
2289
2290         ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
2291         BUG_ON(S_ISDIR(vi->i_mode));
2292         if (!datasync || !NInoNonResident(NTFS_I(vi)))
2293                 ret = ntfs_write_inode(vi, 1);
2294         write_inode_now(vi, !datasync);
2295         /*
2296          * NOTE: If we were to use mapping->private_list (see ext2 and
2297          * fs/buffer.c) for dirty blocks then we could optimize the below to be
2298          * sync_mapping_buffers(vi->i_mapping).
2299          */
2300         err = sync_blockdev(vi->i_sb->s_bdev);
2301         if (unlikely(err && !ret))
2302                 ret = err;
2303         if (likely(!ret))
2304                 ntfs_debug("Done.");
2305         else
2306                 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx.  Error "
2307                                 "%u.", datasync ? "data" : "", vi->i_ino, -ret);
2308         return ret;
2309 }
2310
2311 #endif /* NTFS_RW */
2312
2313 struct file_operations ntfs_file_ops = {
2314         .llseek         = generic_file_llseek,   /* Seek inside file. */
2315         .read           = generic_file_read,     /* Read from file. */
2316         .aio_read       = generic_file_aio_read, /* Async read from file. */
2317         .readv          = generic_file_readv,    /* Read from file. */
2318 #ifdef NTFS_RW
2319         .write          = ntfs_file_write,       /* Write to file. */
2320         .aio_write      = ntfs_file_aio_write,   /* Async write to file. */
2321         .writev         = ntfs_file_writev,      /* Write to file. */
2322         /*.release      = ,*/                    /* Last file is closed.  See
2323                                                     fs/ext2/file.c::
2324                                                     ext2_release_file() for
2325                                                     how to use this to discard
2326                                                     preallocated space for
2327                                                     write opened files. */
2328         .fsync          = ntfs_file_fsync,       /* Sync a file to disk. */
2329         /*.aio_fsync    = ,*/                    /* Sync all outstanding async
2330                                                     i/o operations on a
2331                                                     kiocb. */
2332 #endif /* NTFS_RW */
2333         /*.ioctl        = ,*/                    /* Perform function on the
2334                                                     mounted filesystem. */
2335         .mmap           = generic_file_mmap,     /* Mmap file. */
2336         .open           = ntfs_file_open,        /* Open file. */
2337         .sendfile       = generic_file_sendfile, /* Zero-copy data send with
2338                                                     the data source being on
2339                                                     the ntfs partition.  We do
2340                                                     not need to care about the
2341                                                     data destination. */
2342         /*.sendpage     = ,*/                    /* Zero-copy data send with
2343                                                     the data destination being
2344                                                     on the ntfs partition.  We
2345                                                     do not need to care about
2346                                                     the data source. */
2347 };
2348
2349 struct inode_operations ntfs_file_inode_ops = {
2350 #ifdef NTFS_RW
2351         .truncate       = ntfs_truncate_vfs,
2352         .setattr        = ntfs_setattr,
2353 #endif /* NTFS_RW */
2354 };
2355
2356 struct file_operations ntfs_empty_file_ops = {};
2357
2358 struct inode_operations ntfs_empty_inode_ops = {};