string: factorize skip_spaces and export it to be generally available
[safe/jmp/linux-2.6] / fs / fscache / page.c
1 /* Cache page management and data I/O routines
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include "internal.h"
18
19 /*
20  * check to see if a page is being written to the cache
21  */
22 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
23 {
24         void *val;
25
26         rcu_read_lock();
27         val = radix_tree_lookup(&cookie->stores, page->index);
28         rcu_read_unlock();
29
30         return val != NULL;
31 }
32 EXPORT_SYMBOL(__fscache_check_page_write);
33
34 /*
35  * wait for a page to finish being written to the cache
36  */
37 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
38 {
39         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
40
41         wait_event(*wq, !__fscache_check_page_write(cookie, page));
42 }
43 EXPORT_SYMBOL(__fscache_wait_on_page_write);
44
45 /*
46  * decide whether a page can be released, possibly by cancelling a store to it
47  * - we're allowed to sleep if __GFP_WAIT is flagged
48  */
49 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50                                   struct page *page,
51                                   gfp_t gfp)
52 {
53         struct page *xpage;
54         void *val;
55
56         _enter("%p,%p,%x", cookie, page, gfp);
57
58         rcu_read_lock();
59         val = radix_tree_lookup(&cookie->stores, page->index);
60         if (!val) {
61                 rcu_read_unlock();
62                 fscache_stat(&fscache_n_store_vmscan_not_storing);
63                 __fscache_uncache_page(cookie, page);
64                 return true;
65         }
66
67         /* see if the page is actually undergoing storage - if so we can't get
68          * rid of it till the cache has finished with it */
69         if (radix_tree_tag_get(&cookie->stores, page->index,
70                                FSCACHE_COOKIE_STORING_TAG)) {
71                 rcu_read_unlock();
72                 goto page_busy;
73         }
74
75         /* the page is pending storage, so we attempt to cancel the store and
76          * discard the store request so that the page can be reclaimed */
77         spin_lock(&cookie->stores_lock);
78         rcu_read_unlock();
79
80         if (radix_tree_tag_get(&cookie->stores, page->index,
81                                FSCACHE_COOKIE_STORING_TAG)) {
82                 /* the page started to undergo storage whilst we were looking,
83                  * so now we can only wait or return */
84                 spin_unlock(&cookie->stores_lock);
85                 goto page_busy;
86         }
87
88         xpage = radix_tree_delete(&cookie->stores, page->index);
89         spin_unlock(&cookie->stores_lock);
90
91         if (xpage) {
92                 fscache_stat(&fscache_n_store_vmscan_cancelled);
93                 fscache_stat(&fscache_n_store_radix_deletes);
94                 ASSERTCMP(xpage, ==, page);
95         } else {
96                 fscache_stat(&fscache_n_store_vmscan_gone);
97         }
98
99         wake_up_bit(&cookie->flags, 0);
100         if (xpage)
101                 page_cache_release(xpage);
102         __fscache_uncache_page(cookie, page);
103         return true;
104
105 page_busy:
106         /* we might want to wait here, but that could deadlock the allocator as
107          * the slow-work threads writing to the cache may all end up sleeping
108          * on memory allocation */
109         fscache_stat(&fscache_n_store_vmscan_busy);
110         return false;
111 }
112 EXPORT_SYMBOL(__fscache_maybe_release_page);
113
114 /*
115  * note that a page has finished being written to the cache
116  */
117 static void fscache_end_page_write(struct fscache_object *object,
118                                    struct page *page)
119 {
120         struct fscache_cookie *cookie;
121         struct page *xpage = NULL;
122
123         spin_lock(&object->lock);
124         cookie = object->cookie;
125         if (cookie) {
126                 /* delete the page from the tree if it is now no longer
127                  * pending */
128                 spin_lock(&cookie->stores_lock);
129                 radix_tree_tag_clear(&cookie->stores, page->index,
130                                      FSCACHE_COOKIE_STORING_TAG);
131                 if (!radix_tree_tag_get(&cookie->stores, page->index,
132                                         FSCACHE_COOKIE_PENDING_TAG)) {
133                         fscache_stat(&fscache_n_store_radix_deletes);
134                         xpage = radix_tree_delete(&cookie->stores, page->index);
135                 }
136                 spin_unlock(&cookie->stores_lock);
137                 wake_up_bit(&cookie->flags, 0);
138         }
139         spin_unlock(&object->lock);
140         if (xpage)
141                 page_cache_release(xpage);
142 }
143
144 /*
145  * actually apply the changed attributes to a cache object
146  */
147 static void fscache_attr_changed_op(struct fscache_operation *op)
148 {
149         struct fscache_object *object = op->object;
150         int ret;
151
152         _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
153
154         fscache_stat(&fscache_n_attr_changed_calls);
155
156         if (fscache_object_is_active(object)) {
157                 fscache_set_op_state(op, "CallFS");
158                 fscache_stat(&fscache_n_cop_attr_changed);
159                 ret = object->cache->ops->attr_changed(object);
160                 fscache_stat_d(&fscache_n_cop_attr_changed);
161                 fscache_set_op_state(op, "Done");
162                 if (ret < 0)
163                         fscache_abort_object(object);
164         }
165
166         _leave("");
167 }
168
169 /*
170  * notification that the attributes on an object have changed
171  */
172 int __fscache_attr_changed(struct fscache_cookie *cookie)
173 {
174         struct fscache_operation *op;
175         struct fscache_object *object;
176
177         _enter("%p", cookie);
178
179         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
180
181         fscache_stat(&fscache_n_attr_changed);
182
183         op = kzalloc(sizeof(*op), GFP_KERNEL);
184         if (!op) {
185                 fscache_stat(&fscache_n_attr_changed_nomem);
186                 _leave(" = -ENOMEM");
187                 return -ENOMEM;
188         }
189
190         fscache_operation_init(op, NULL);
191         fscache_operation_init_slow(op, fscache_attr_changed_op);
192         op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
193         fscache_set_op_name(op, "Attr");
194
195         spin_lock(&cookie->lock);
196
197         if (hlist_empty(&cookie->backing_objects))
198                 goto nobufs;
199         object = hlist_entry(cookie->backing_objects.first,
200                              struct fscache_object, cookie_link);
201
202         if (fscache_submit_exclusive_op(object, op) < 0)
203                 goto nobufs;
204         spin_unlock(&cookie->lock);
205         fscache_stat(&fscache_n_attr_changed_ok);
206         fscache_put_operation(op);
207         _leave(" = 0");
208         return 0;
209
210 nobufs:
211         spin_unlock(&cookie->lock);
212         kfree(op);
213         fscache_stat(&fscache_n_attr_changed_nobufs);
214         _leave(" = %d", -ENOBUFS);
215         return -ENOBUFS;
216 }
217 EXPORT_SYMBOL(__fscache_attr_changed);
218
219 /*
220  * handle secondary execution given to a retrieval op on behalf of the
221  * cache
222  */
223 static void fscache_retrieval_work(struct work_struct *work)
224 {
225         struct fscache_retrieval *op =
226                 container_of(work, struct fscache_retrieval, op.fast_work);
227         unsigned long start;
228
229         _enter("{OP%x}", op->op.debug_id);
230
231         start = jiffies;
232         op->op.processor(&op->op);
233         fscache_hist(fscache_ops_histogram, start);
234         fscache_put_operation(&op->op);
235 }
236
237 /*
238  * release a retrieval op reference
239  */
240 static void fscache_release_retrieval_op(struct fscache_operation *_op)
241 {
242         struct fscache_retrieval *op =
243                 container_of(_op, struct fscache_retrieval, op);
244
245         _enter("{OP%x}", op->op.debug_id);
246
247         fscache_hist(fscache_retrieval_histogram, op->start_time);
248         if (op->context)
249                 fscache_put_context(op->op.object->cookie, op->context);
250
251         _leave("");
252 }
253
254 /*
255  * allocate a retrieval op
256  */
257 static struct fscache_retrieval *fscache_alloc_retrieval(
258         struct address_space *mapping,
259         fscache_rw_complete_t end_io_func,
260         void *context)
261 {
262         struct fscache_retrieval *op;
263
264         /* allocate a retrieval operation and attempt to submit it */
265         op = kzalloc(sizeof(*op), GFP_NOIO);
266         if (!op) {
267                 fscache_stat(&fscache_n_retrievals_nomem);
268                 return NULL;
269         }
270
271         fscache_operation_init(&op->op, fscache_release_retrieval_op);
272         op->op.flags    = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
273         op->mapping     = mapping;
274         op->end_io_func = end_io_func;
275         op->context     = context;
276         op->start_time  = jiffies;
277         INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
278         INIT_LIST_HEAD(&op->to_do);
279         fscache_set_op_name(&op->op, "Retr");
280         return op;
281 }
282
283 /*
284  * wait for a deferred lookup to complete
285  */
286 static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
287 {
288         unsigned long jif;
289
290         _enter("");
291
292         if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
293                 _leave(" = 0 [imm]");
294                 return 0;
295         }
296
297         fscache_stat(&fscache_n_retrievals_wait);
298
299         jif = jiffies;
300         if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
301                         fscache_wait_bit_interruptible,
302                         TASK_INTERRUPTIBLE) != 0) {
303                 fscache_stat(&fscache_n_retrievals_intr);
304                 _leave(" = -ERESTARTSYS");
305                 return -ERESTARTSYS;
306         }
307
308         ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
309
310         smp_rmb();
311         fscache_hist(fscache_retrieval_delay_histogram, jif);
312         _leave(" = 0 [dly]");
313         return 0;
314 }
315
316 /*
317  * wait for an object to become active (or dead)
318  */
319 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
320                                                  struct fscache_retrieval *op,
321                                                  atomic_t *stat_op_waits,
322                                                  atomic_t *stat_object_dead)
323 {
324         int ret;
325
326         if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
327                 goto check_if_dead;
328
329         _debug(">>> WT");
330         fscache_stat(stat_op_waits);
331         if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
332                         fscache_wait_bit_interruptible,
333                         TASK_INTERRUPTIBLE) < 0) {
334                 ret = fscache_cancel_op(&op->op);
335                 if (ret == 0)
336                         return -ERESTARTSYS;
337
338                 /* it's been removed from the pending queue by another party,
339                  * so we should get to run shortly */
340                 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
341                             fscache_wait_bit, TASK_UNINTERRUPTIBLE);
342         }
343         _debug("<<< GO");
344
345 check_if_dead:
346         if (unlikely(fscache_object_is_dead(object))) {
347                 fscache_stat(stat_object_dead);
348                 return -ENOBUFS;
349         }
350         return 0;
351 }
352
353 /*
354  * read a page from the cache or allocate a block in which to store it
355  * - we return:
356  *   -ENOMEM    - out of memory, nothing done
357  *   -ERESTARTSYS - interrupted
358  *   -ENOBUFS   - no backing object available in which to cache the block
359  *   -ENODATA   - no data available in the backing object for this block
360  *   0          - dispatched a read - it'll call end_io_func() when finished
361  */
362 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
363                                  struct page *page,
364                                  fscache_rw_complete_t end_io_func,
365                                  void *context,
366                                  gfp_t gfp)
367 {
368         struct fscache_retrieval *op;
369         struct fscache_object *object;
370         int ret;
371
372         _enter("%p,%p,,,", cookie, page);
373
374         fscache_stat(&fscache_n_retrievals);
375
376         if (hlist_empty(&cookie->backing_objects))
377                 goto nobufs;
378
379         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
380         ASSERTCMP(page, !=, NULL);
381
382         if (fscache_wait_for_deferred_lookup(cookie) < 0)
383                 return -ERESTARTSYS;
384
385         op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
386         if (!op) {
387                 _leave(" = -ENOMEM");
388                 return -ENOMEM;
389         }
390         fscache_set_op_name(&op->op, "RetrRA1");
391
392         spin_lock(&cookie->lock);
393
394         if (hlist_empty(&cookie->backing_objects))
395                 goto nobufs_unlock;
396         object = hlist_entry(cookie->backing_objects.first,
397                              struct fscache_object, cookie_link);
398
399         ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
400
401         atomic_inc(&object->n_reads);
402         set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
403
404         if (fscache_submit_op(object, &op->op) < 0)
405                 goto nobufs_unlock;
406         spin_unlock(&cookie->lock);
407
408         fscache_stat(&fscache_n_retrieval_ops);
409
410         /* pin the netfs read context in case we need to do the actual netfs
411          * read because we've encountered a cache read failure */
412         fscache_get_context(object->cookie, op->context);
413
414         /* we wait for the operation to become active, and then process it
415          * *here*, in this thread, and not in the thread pool */
416         ret = fscache_wait_for_retrieval_activation(
417                 object, op,
418                 __fscache_stat(&fscache_n_retrieval_op_waits),
419                 __fscache_stat(&fscache_n_retrievals_object_dead));
420         if (ret < 0)
421                 goto error;
422
423         /* ask the cache to honour the operation */
424         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
425                 fscache_stat(&fscache_n_cop_allocate_page);
426                 ret = object->cache->ops->allocate_page(op, page, gfp);
427                 fscache_stat_d(&fscache_n_cop_allocate_page);
428                 if (ret == 0)
429                         ret = -ENODATA;
430         } else {
431                 fscache_stat(&fscache_n_cop_read_or_alloc_page);
432                 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
433                 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
434         }
435
436 error:
437         if (ret == -ENOMEM)
438                 fscache_stat(&fscache_n_retrievals_nomem);
439         else if (ret == -ERESTARTSYS)
440                 fscache_stat(&fscache_n_retrievals_intr);
441         else if (ret == -ENODATA)
442                 fscache_stat(&fscache_n_retrievals_nodata);
443         else if (ret < 0)
444                 fscache_stat(&fscache_n_retrievals_nobufs);
445         else
446                 fscache_stat(&fscache_n_retrievals_ok);
447
448         fscache_put_retrieval(op);
449         _leave(" = %d", ret);
450         return ret;
451
452 nobufs_unlock:
453         spin_unlock(&cookie->lock);
454         kfree(op);
455 nobufs:
456         fscache_stat(&fscache_n_retrievals_nobufs);
457         _leave(" = -ENOBUFS");
458         return -ENOBUFS;
459 }
460 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
461
462 /*
463  * read a list of page from the cache or allocate a block in which to store
464  * them
465  * - we return:
466  *   -ENOMEM    - out of memory, some pages may be being read
467  *   -ERESTARTSYS - interrupted, some pages may be being read
468  *   -ENOBUFS   - no backing object or space available in which to cache any
469  *                pages not being read
470  *   -ENODATA   - no data available in the backing object for some or all of
471  *                the pages
472  *   0          - dispatched a read on all pages
473  *
474  * end_io_func() will be called for each page read from the cache as it is
475  * finishes being read
476  *
477  * any pages for which a read is dispatched will be removed from pages and
478  * nr_pages
479  */
480 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
481                                   struct address_space *mapping,
482                                   struct list_head *pages,
483                                   unsigned *nr_pages,
484                                   fscache_rw_complete_t end_io_func,
485                                   void *context,
486                                   gfp_t gfp)
487 {
488         struct fscache_retrieval *op;
489         struct fscache_object *object;
490         int ret;
491
492         _enter("%p,,%d,,,", cookie, *nr_pages);
493
494         fscache_stat(&fscache_n_retrievals);
495
496         if (hlist_empty(&cookie->backing_objects))
497                 goto nobufs;
498
499         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
500         ASSERTCMP(*nr_pages, >, 0);
501         ASSERT(!list_empty(pages));
502
503         if (fscache_wait_for_deferred_lookup(cookie) < 0)
504                 return -ERESTARTSYS;
505
506         op = fscache_alloc_retrieval(mapping, end_io_func, context);
507         if (!op)
508                 return -ENOMEM;
509         fscache_set_op_name(&op->op, "RetrRAN");
510
511         spin_lock(&cookie->lock);
512
513         if (hlist_empty(&cookie->backing_objects))
514                 goto nobufs_unlock;
515         object = hlist_entry(cookie->backing_objects.first,
516                              struct fscache_object, cookie_link);
517
518         atomic_inc(&object->n_reads);
519         set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
520
521         if (fscache_submit_op(object, &op->op) < 0)
522                 goto nobufs_unlock;
523         spin_unlock(&cookie->lock);
524
525         fscache_stat(&fscache_n_retrieval_ops);
526
527         /* pin the netfs read context in case we need to do the actual netfs
528          * read because we've encountered a cache read failure */
529         fscache_get_context(object->cookie, op->context);
530
531         /* we wait for the operation to become active, and then process it
532          * *here*, in this thread, and not in the thread pool */
533         ret = fscache_wait_for_retrieval_activation(
534                 object, op,
535                 __fscache_stat(&fscache_n_retrieval_op_waits),
536                 __fscache_stat(&fscache_n_retrievals_object_dead));
537         if (ret < 0)
538                 goto error;
539
540         /* ask the cache to honour the operation */
541         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
542                 fscache_stat(&fscache_n_cop_allocate_pages);
543                 ret = object->cache->ops->allocate_pages(
544                         op, pages, nr_pages, gfp);
545                 fscache_stat_d(&fscache_n_cop_allocate_pages);
546         } else {
547                 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
548                 ret = object->cache->ops->read_or_alloc_pages(
549                         op, pages, nr_pages, gfp);
550                 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
551         }
552
553 error:
554         if (ret == -ENOMEM)
555                 fscache_stat(&fscache_n_retrievals_nomem);
556         else if (ret == -ERESTARTSYS)
557                 fscache_stat(&fscache_n_retrievals_intr);
558         else if (ret == -ENODATA)
559                 fscache_stat(&fscache_n_retrievals_nodata);
560         else if (ret < 0)
561                 fscache_stat(&fscache_n_retrievals_nobufs);
562         else
563                 fscache_stat(&fscache_n_retrievals_ok);
564
565         fscache_put_retrieval(op);
566         _leave(" = %d", ret);
567         return ret;
568
569 nobufs_unlock:
570         spin_unlock(&cookie->lock);
571         kfree(op);
572 nobufs:
573         fscache_stat(&fscache_n_retrievals_nobufs);
574         _leave(" = -ENOBUFS");
575         return -ENOBUFS;
576 }
577 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
578
579 /*
580  * allocate a block in the cache on which to store a page
581  * - we return:
582  *   -ENOMEM    - out of memory, nothing done
583  *   -ERESTARTSYS - interrupted
584  *   -ENOBUFS   - no backing object available in which to cache the block
585  *   0          - block allocated
586  */
587 int __fscache_alloc_page(struct fscache_cookie *cookie,
588                          struct page *page,
589                          gfp_t gfp)
590 {
591         struct fscache_retrieval *op;
592         struct fscache_object *object;
593         int ret;
594
595         _enter("%p,%p,,,", cookie, page);
596
597         fscache_stat(&fscache_n_allocs);
598
599         if (hlist_empty(&cookie->backing_objects))
600                 goto nobufs;
601
602         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
603         ASSERTCMP(page, !=, NULL);
604
605         if (fscache_wait_for_deferred_lookup(cookie) < 0)
606                 return -ERESTARTSYS;
607
608         op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
609         if (!op)
610                 return -ENOMEM;
611         fscache_set_op_name(&op->op, "RetrAL1");
612
613         spin_lock(&cookie->lock);
614
615         if (hlist_empty(&cookie->backing_objects))
616                 goto nobufs_unlock;
617         object = hlist_entry(cookie->backing_objects.first,
618                              struct fscache_object, cookie_link);
619
620         if (fscache_submit_op(object, &op->op) < 0)
621                 goto nobufs_unlock;
622         spin_unlock(&cookie->lock);
623
624         fscache_stat(&fscache_n_alloc_ops);
625
626         ret = fscache_wait_for_retrieval_activation(
627                 object, op,
628                 __fscache_stat(&fscache_n_alloc_op_waits),
629                 __fscache_stat(&fscache_n_allocs_object_dead));
630         if (ret < 0)
631                 goto error;
632
633         /* ask the cache to honour the operation */
634         fscache_stat(&fscache_n_cop_allocate_page);
635         ret = object->cache->ops->allocate_page(op, page, gfp);
636         fscache_stat_d(&fscache_n_cop_allocate_page);
637
638 error:
639         if (ret == -ERESTARTSYS)
640                 fscache_stat(&fscache_n_allocs_intr);
641         else if (ret < 0)
642                 fscache_stat(&fscache_n_allocs_nobufs);
643         else
644                 fscache_stat(&fscache_n_allocs_ok);
645
646         fscache_put_retrieval(op);
647         _leave(" = %d", ret);
648         return ret;
649
650 nobufs_unlock:
651         spin_unlock(&cookie->lock);
652         kfree(op);
653 nobufs:
654         fscache_stat(&fscache_n_allocs_nobufs);
655         _leave(" = -ENOBUFS");
656         return -ENOBUFS;
657 }
658 EXPORT_SYMBOL(__fscache_alloc_page);
659
660 /*
661  * release a write op reference
662  */
663 static void fscache_release_write_op(struct fscache_operation *_op)
664 {
665         _enter("{OP%x}", _op->debug_id);
666 }
667
668 /*
669  * perform the background storage of a page into the cache
670  */
671 static void fscache_write_op(struct fscache_operation *_op)
672 {
673         struct fscache_storage *op =
674                 container_of(_op, struct fscache_storage, op);
675         struct fscache_object *object = op->op.object;
676         struct fscache_cookie *cookie;
677         struct page *page;
678         unsigned n;
679         void *results[1];
680         int ret;
681
682         _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
683
684         fscache_set_op_state(&op->op, "GetPage");
685
686         spin_lock(&object->lock);
687         cookie = object->cookie;
688
689         if (!fscache_object_is_active(object) || !cookie) {
690                 spin_unlock(&object->lock);
691                 _leave("");
692                 return;
693         }
694
695         spin_lock(&cookie->stores_lock);
696
697         fscache_stat(&fscache_n_store_calls);
698
699         /* find a page to store */
700         page = NULL;
701         n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
702                                        FSCACHE_COOKIE_PENDING_TAG);
703         if (n != 1)
704                 goto superseded;
705         page = results[0];
706         _debug("gang %d [%lx]", n, page->index);
707         if (page->index > op->store_limit) {
708                 fscache_stat(&fscache_n_store_pages_over_limit);
709                 goto superseded;
710         }
711
712         if (page) {
713                 radix_tree_tag_set(&cookie->stores, page->index,
714                                    FSCACHE_COOKIE_STORING_TAG);
715                 radix_tree_tag_clear(&cookie->stores, page->index,
716                                      FSCACHE_COOKIE_PENDING_TAG);
717         }
718
719         spin_unlock(&cookie->stores_lock);
720         spin_unlock(&object->lock);
721
722         if (page) {
723                 fscache_set_op_state(&op->op, "Store");
724                 fscache_stat(&fscache_n_store_pages);
725                 fscache_stat(&fscache_n_cop_write_page);
726                 ret = object->cache->ops->write_page(op, page);
727                 fscache_stat_d(&fscache_n_cop_write_page);
728                 fscache_set_op_state(&op->op, "EndWrite");
729                 fscache_end_page_write(object, page);
730                 if (ret < 0) {
731                         fscache_set_op_state(&op->op, "Abort");
732                         fscache_abort_object(object);
733                 } else {
734                         fscache_enqueue_operation(&op->op);
735                 }
736         }
737
738         _leave("");
739         return;
740
741 superseded:
742         /* this writer is going away and there aren't any more things to
743          * write */
744         _debug("cease");
745         spin_unlock(&cookie->stores_lock);
746         clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
747         spin_unlock(&object->lock);
748         _leave("");
749 }
750
751 /*
752  * request a page be stored in the cache
753  * - returns:
754  *   -ENOMEM    - out of memory, nothing done
755  *   -ENOBUFS   - no backing object available in which to cache the page
756  *   0          - dispatched a write - it'll call end_io_func() when finished
757  *
758  * if the cookie still has a backing object at this point, that object can be
759  * in one of a few states with respect to storage processing:
760  *
761  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
762  *      set)
763  *
764  *      (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
765  *          fill op)
766  *
767  *      (b) writes deferred till post-creation (mark page for writing and
768  *          return immediately)
769  *
770  *  (2) negative lookup, object created, initial fill being made from netfs
771  *      (FSCACHE_COOKIE_INITIAL_FILL is set)
772  *
773  *      (a) fill point not yet reached this page (mark page for writing and
774  *          return)
775  *
776  *      (b) fill point passed this page (queue op to store this page)
777  *
778  *  (3) object extant (queue op to store this page)
779  *
780  * any other state is invalid
781  */
782 int __fscache_write_page(struct fscache_cookie *cookie,
783                          struct page *page,
784                          gfp_t gfp)
785 {
786         struct fscache_storage *op;
787         struct fscache_object *object;
788         int ret;
789
790         _enter("%p,%x,", cookie, (u32) page->flags);
791
792         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
793         ASSERT(PageFsCache(page));
794
795         fscache_stat(&fscache_n_stores);
796
797         op = kzalloc(sizeof(*op), GFP_NOIO);
798         if (!op)
799                 goto nomem;
800
801         fscache_operation_init(&op->op, fscache_release_write_op);
802         fscache_operation_init_slow(&op->op, fscache_write_op);
803         op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
804         fscache_set_op_name(&op->op, "Write1");
805
806         ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
807         if (ret < 0)
808                 goto nomem_free;
809
810         ret = -ENOBUFS;
811         spin_lock(&cookie->lock);
812
813         if (hlist_empty(&cookie->backing_objects))
814                 goto nobufs;
815         object = hlist_entry(cookie->backing_objects.first,
816                              struct fscache_object, cookie_link);
817         if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
818                 goto nobufs;
819
820         /* add the page to the pending-storage radix tree on the backing
821          * object */
822         spin_lock(&object->lock);
823         spin_lock(&cookie->stores_lock);
824
825         _debug("store limit %llx", (unsigned long long) object->store_limit);
826
827         ret = radix_tree_insert(&cookie->stores, page->index, page);
828         if (ret < 0) {
829                 if (ret == -EEXIST)
830                         goto already_queued;
831                 _debug("insert failed %d", ret);
832                 goto nobufs_unlock_obj;
833         }
834
835         radix_tree_tag_set(&cookie->stores, page->index,
836                            FSCACHE_COOKIE_PENDING_TAG);
837         page_cache_get(page);
838
839         /* we only want one writer at a time, but we do need to queue new
840          * writers after exclusive ops */
841         if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
842                 goto already_pending;
843
844         spin_unlock(&cookie->stores_lock);
845         spin_unlock(&object->lock);
846
847         op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
848         op->store_limit = object->store_limit;
849
850         if (fscache_submit_op(object, &op->op) < 0)
851                 goto submit_failed;
852
853         spin_unlock(&cookie->lock);
854         radix_tree_preload_end();
855         fscache_stat(&fscache_n_store_ops);
856         fscache_stat(&fscache_n_stores_ok);
857
858         /* the slow work queue now carries its own ref on the object */
859         fscache_put_operation(&op->op);
860         _leave(" = 0");
861         return 0;
862
863 already_queued:
864         fscache_stat(&fscache_n_stores_again);
865 already_pending:
866         spin_unlock(&cookie->stores_lock);
867         spin_unlock(&object->lock);
868         spin_unlock(&cookie->lock);
869         radix_tree_preload_end();
870         kfree(op);
871         fscache_stat(&fscache_n_stores_ok);
872         _leave(" = 0");
873         return 0;
874
875 submit_failed:
876         spin_lock(&cookie->stores_lock);
877         radix_tree_delete(&cookie->stores, page->index);
878         spin_unlock(&cookie->stores_lock);
879         page_cache_release(page);
880         ret = -ENOBUFS;
881         goto nobufs;
882
883 nobufs_unlock_obj:
884         spin_unlock(&object->lock);
885 nobufs:
886         spin_unlock(&cookie->lock);
887         radix_tree_preload_end();
888         kfree(op);
889         fscache_stat(&fscache_n_stores_nobufs);
890         _leave(" = -ENOBUFS");
891         return -ENOBUFS;
892
893 nomem_free:
894         kfree(op);
895 nomem:
896         fscache_stat(&fscache_n_stores_oom);
897         _leave(" = -ENOMEM");
898         return -ENOMEM;
899 }
900 EXPORT_SYMBOL(__fscache_write_page);
901
902 /*
903  * remove a page from the cache
904  */
905 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
906 {
907         struct fscache_object *object;
908
909         _enter(",%p", page);
910
911         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
912         ASSERTCMP(page, !=, NULL);
913
914         fscache_stat(&fscache_n_uncaches);
915
916         /* cache withdrawal may beat us to it */
917         if (!PageFsCache(page))
918                 goto done;
919
920         /* get the object */
921         spin_lock(&cookie->lock);
922
923         if (hlist_empty(&cookie->backing_objects)) {
924                 ClearPageFsCache(page);
925                 goto done_unlock;
926         }
927
928         object = hlist_entry(cookie->backing_objects.first,
929                              struct fscache_object, cookie_link);
930
931         /* there might now be stuff on disk we could read */
932         clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
933
934         /* only invoke the cache backend if we managed to mark the page
935          * uncached here; this deals with synchronisation vs withdrawal */
936         if (TestClearPageFsCache(page) &&
937             object->cache->ops->uncache_page) {
938                 /* the cache backend releases the cookie lock */
939                 fscache_stat(&fscache_n_cop_uncache_page);
940                 object->cache->ops->uncache_page(object, page);
941                 fscache_stat_d(&fscache_n_cop_uncache_page);
942                 goto done;
943         }
944
945 done_unlock:
946         spin_unlock(&cookie->lock);
947 done:
948         _leave("");
949 }
950 EXPORT_SYMBOL(__fscache_uncache_page);
951
952 /**
953  * fscache_mark_pages_cached - Mark pages as being cached
954  * @op: The retrieval op pages are being marked for
955  * @pagevec: The pages to be marked
956  *
957  * Mark a bunch of netfs pages as being cached.  After this is called,
958  * the netfs must call fscache_uncache_page() to remove the mark.
959  */
960 void fscache_mark_pages_cached(struct fscache_retrieval *op,
961                                struct pagevec *pagevec)
962 {
963         struct fscache_cookie *cookie = op->op.object->cookie;
964         unsigned long loop;
965
966 #ifdef CONFIG_FSCACHE_STATS
967         atomic_add(pagevec->nr, &fscache_n_marks);
968 #endif
969
970         for (loop = 0; loop < pagevec->nr; loop++) {
971                 struct page *page = pagevec->pages[loop];
972
973                 _debug("- mark %p{%lx}", page, page->index);
974                 if (TestSetPageFsCache(page)) {
975                         static bool once_only;
976                         if (!once_only) {
977                                 once_only = true;
978                                 printk(KERN_WARNING "FS-Cache:"
979                                        " Cookie type %s marked page %lx"
980                                        " multiple times\n",
981                                        cookie->def->name, page->index);
982                         }
983                 }
984         }
985
986         if (cookie->def->mark_pages_cached)
987                 cookie->def->mark_pages_cached(cookie->netfs_data,
988                                                op->mapping, pagevec);
989         pagevec_reinit(pagevec);
990 }
991 EXPORT_SYMBOL(fscache_mark_pages_cached);