ARM: 6165/1: trap overflows on highmem pages from kmap_atomic when debugging
[safe/jmp/linux-2.6] / fs / fs-writeback.c
index 0f62957..ea8592b 100644 (file)
@@ -42,10 +42,10 @@ struct wb_writeback_args {
        long nr_pages;
        struct super_block *sb;
        enum writeback_sync_modes sync_mode;
-       int for_kupdate:1;
-       int range_cyclic:1;
-       int for_background:1;
-       int sb_pinned:1;
+       unsigned int for_kupdate:1;
+       unsigned int range_cyclic:1;
+       unsigned int for_background:1;
+       unsigned int sb_pinned:1;
 };
 
 /*
@@ -193,7 +193,8 @@ static void bdi_wait_on_work_clear(struct bdi_work *work)
 }
 
 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
-                                struct wb_writeback_args *args)
+                                struct wb_writeback_args *args,
+                                int wait)
 {
        struct bdi_work *work;
 
@@ -205,6 +206,8 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
        if (work) {
                bdi_work_init(work, args);
                bdi_queue_work(bdi, work);
+               if (wait)
+                       bdi_wait_on_work_clear(work);
        } else {
                struct bdi_writeback *wb = &bdi->wb;
 
@@ -279,7 +282,7 @@ void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
                args.for_background = 1;
        }
 
-       bdi_alloc_queue_work(bdi, &args);
+       bdi_alloc_queue_work(bdi, &args, sb_locked);
 }
 
 /*
@@ -406,11 +409,11 @@ static void inode_wait_for_writeback(struct inode *inode)
        wait_queue_head_t *wqh;
 
        wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
-       do {
+        while (inode->i_state & I_SYNC) {
                spin_unlock(&inode_lock);
                __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
                spin_lock(&inode_lock);
-       } while (inode->i_state & I_SYNC);
+       }
 }
 
 /*
@@ -909,6 +912,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
 
        while ((work = get_next_work_item(bdi, wb)) != NULL) {
                struct wb_writeback_args args = work->args;
+               int post_clear;
 
                /*
                 * Override sync mode, in case we must wait for completion
@@ -916,11 +920,13 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
                if (force_wait)
                        work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
 
+               post_clear = WB_SYNC_ALL || args.sb_pinned;
+
                /*
                 * If this isn't a data integrity operation, just notify
                 * that we have seen this work and we are now starting it.
                 */
-               if (args.sync_mode == WB_SYNC_NONE)
+               if (!post_clear)
                        wb_clear_pending(wb, work);
 
                wrote += wb_writeback(wb, &args);
@@ -929,7 +935,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
                 * This is a data integrity writeback, so only do the
                 * notification when we have completed the work.
                 */
-               if (args.sync_mode == WB_SYNC_ALL)
+               if (post_clear)
                        wb_clear_pending(wb, work);
        }
 
@@ -972,8 +978,13 @@ int bdi_writeback_task(struct bdi_writeback *wb)
                if (dirty_writeback_interval) {
                        wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
                        schedule_timeout_interruptible(wait_jiffies);
-               } else
-                       schedule();
+               } else {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       if (list_empty_careful(&wb->bdi->work_list) &&
+                           !kthread_should_stop())
+                               schedule();
+                       __set_current_state(TASK_RUNNING);
+               }
 
                try_to_freeze();
        }
@@ -1000,7 +1011,7 @@ static void bdi_writeback_all(struct super_block *sb, long nr_pages)
                if (!bdi_has_dirty_io(bdi))
                        continue;
 
-               bdi_alloc_queue_work(bdi, &args);
+               bdi_alloc_queue_work(bdi, &args, 0);
        }
 
        rcu_read_unlock();