knfsd: allow cache_register to return error on failure
[safe/jmp/linux-2.6] / net / sunrpc / cache.c
1 /*
2  * net/sunrpc/cache.c
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34
35 #define  RPCDBG_FACILITY RPCDBG_CACHE
36
37 static int cache_defer_req(struct cache_req *req, struct cache_head *item);
38 static void cache_revisit_request(struct cache_head *item);
39
40 static void cache_init(struct cache_head *h)
41 {
42         time_t now = get_seconds();
43         h->next = NULL;
44         h->flags = 0;
45         kref_init(&h->ref);
46         h->expiry_time = now + CACHE_NEW_EXPIRY;
47         h->last_refresh = now;
48 }
49
50 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
51                                        struct cache_head *key, int hash)
52 {
53         struct cache_head **head,  **hp;
54         struct cache_head *new = NULL;
55
56         head = &detail->hash_table[hash];
57
58         read_lock(&detail->hash_lock);
59
60         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
61                 struct cache_head *tmp = *hp;
62                 if (detail->match(tmp, key)) {
63                         cache_get(tmp);
64                         read_unlock(&detail->hash_lock);
65                         return tmp;
66                 }
67         }
68         read_unlock(&detail->hash_lock);
69         /* Didn't find anything, insert an empty entry */
70
71         new = detail->alloc();
72         if (!new)
73                 return NULL;
74         /* must fully initialise 'new', else
75          * we might get lose if we need to
76          * cache_put it soon.
77          */
78         cache_init(new);
79         detail->init(new, key);
80
81         write_lock(&detail->hash_lock);
82
83         /* check if entry appeared while we slept */
84         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
85                 struct cache_head *tmp = *hp;
86                 if (detail->match(tmp, key)) {
87                         cache_get(tmp);
88                         write_unlock(&detail->hash_lock);
89                         cache_put(new, detail);
90                         return tmp;
91                 }
92         }
93         new->next = *head;
94         *head = new;
95         detail->entries++;
96         cache_get(new);
97         write_unlock(&detail->hash_lock);
98
99         return new;
100 }
101 EXPORT_SYMBOL(sunrpc_cache_lookup);
102
103
104 static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
105
106 static int cache_fresh_locked(struct cache_head *head, time_t expiry)
107 {
108         head->expiry_time = expiry;
109         head->last_refresh = get_seconds();
110         return !test_and_set_bit(CACHE_VALID, &head->flags);
111 }
112
113 static void cache_fresh_unlocked(struct cache_head *head,
114                         struct cache_detail *detail, int new)
115 {
116         if (new)
117                 cache_revisit_request(head);
118         if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
119                 cache_revisit_request(head);
120                 queue_loose(detail, head);
121         }
122 }
123
124 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
125                                        struct cache_head *new, struct cache_head *old, int hash)
126 {
127         /* The 'old' entry is to be replaced by 'new'.
128          * If 'old' is not VALID, we update it directly,
129          * otherwise we need to replace it
130          */
131         struct cache_head **head;
132         struct cache_head *tmp;
133         int is_new;
134
135         if (!test_bit(CACHE_VALID, &old->flags)) {
136                 write_lock(&detail->hash_lock);
137                 if (!test_bit(CACHE_VALID, &old->flags)) {
138                         if (test_bit(CACHE_NEGATIVE, &new->flags))
139                                 set_bit(CACHE_NEGATIVE, &old->flags);
140                         else
141                                 detail->update(old, new);
142                         is_new = cache_fresh_locked(old, new->expiry_time);
143                         write_unlock(&detail->hash_lock);
144                         cache_fresh_unlocked(old, detail, is_new);
145                         return old;
146                 }
147                 write_unlock(&detail->hash_lock);
148         }
149         /* We need to insert a new entry */
150         tmp = detail->alloc();
151         if (!tmp) {
152                 cache_put(old, detail);
153                 return NULL;
154         }
155         cache_init(tmp);
156         detail->init(tmp, old);
157         head = &detail->hash_table[hash];
158
159         write_lock(&detail->hash_lock);
160         if (test_bit(CACHE_NEGATIVE, &new->flags))
161                 set_bit(CACHE_NEGATIVE, &tmp->flags);
162         else
163                 detail->update(tmp, new);
164         tmp->next = *head;
165         *head = tmp;
166         detail->entries++;
167         cache_get(tmp);
168         is_new = cache_fresh_locked(tmp, new->expiry_time);
169         cache_fresh_locked(old, 0);
170         write_unlock(&detail->hash_lock);
171         cache_fresh_unlocked(tmp, detail, is_new);
172         cache_fresh_unlocked(old, detail, 0);
173         cache_put(old, detail);
174         return tmp;
175 }
176 EXPORT_SYMBOL(sunrpc_cache_update);
177
178 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
179 /*
180  * This is the generic cache management routine for all
181  * the authentication caches.
182  * It checks the currency of a cache item and will (later)
183  * initiate an upcall to fill it if needed.
184  *
185  *
186  * Returns 0 if the cache_head can be used, or cache_puts it and returns
187  * -EAGAIN if upcall is pending,
188  * -ETIMEDOUT if upcall failed and should be retried,
189  * -ENOENT if cache entry was negative
190  */
191 int cache_check(struct cache_detail *detail,
192                     struct cache_head *h, struct cache_req *rqstp)
193 {
194         int rv;
195         long refresh_age, age;
196
197         /* First decide return status as best we can */
198         if (!test_bit(CACHE_VALID, &h->flags) ||
199             h->expiry_time < get_seconds())
200                 rv = -EAGAIN;
201         else if (detail->flush_time > h->last_refresh)
202                 rv = -EAGAIN;
203         else {
204                 /* entry is valid */
205                 if (test_bit(CACHE_NEGATIVE, &h->flags))
206                         rv = -ENOENT;
207                 else rv = 0;
208         }
209
210         /* now see if we want to start an upcall */
211         refresh_age = (h->expiry_time - h->last_refresh);
212         age = get_seconds() - h->last_refresh;
213
214         if (rqstp == NULL) {
215                 if (rv == -EAGAIN)
216                         rv = -ENOENT;
217         } else if (rv == -EAGAIN || age > refresh_age/2) {
218                 dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
219                                 refresh_age, age);
220                 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
221                         switch (cache_make_upcall(detail, h)) {
222                         case -EINVAL:
223                                 clear_bit(CACHE_PENDING, &h->flags);
224                                 if (rv == -EAGAIN) {
225                                         set_bit(CACHE_NEGATIVE, &h->flags);
226                                         cache_fresh_unlocked(h, detail,
227                                              cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
228                                         rv = -ENOENT;
229                                 }
230                                 break;
231
232                         case -EAGAIN:
233                                 clear_bit(CACHE_PENDING, &h->flags);
234                                 cache_revisit_request(h);
235                                 break;
236                         }
237                 }
238         }
239
240         if (rv == -EAGAIN)
241                 if (cache_defer_req(rqstp, h) != 0)
242                         rv = -ETIMEDOUT;
243
244         if (rv)
245                 cache_put(h, detail);
246         return rv;
247 }
248
249 /*
250  * caches need to be periodically cleaned.
251  * For this we maintain a list of cache_detail and
252  * a current pointer into that list and into the table
253  * for that entry.
254  *
255  * Each time clean_cache is called it finds the next non-empty entry
256  * in the current table and walks the list in that entry
257  * looking for entries that can be removed.
258  *
259  * An entry gets removed if:
260  * - The expiry is before current time
261  * - The last_refresh time is before the flush_time for that cache
262  *
263  * later we might drop old entries with non-NEVER expiry if that table
264  * is getting 'full' for some definition of 'full'
265  *
266  * The question of "how often to scan a table" is an interesting one
267  * and is answered in part by the use of the "nextcheck" field in the
268  * cache_detail.
269  * When a scan of a table begins, the nextcheck field is set to a time
270  * that is well into the future.
271  * While scanning, if an expiry time is found that is earlier than the
272  * current nextcheck time, nextcheck is set to that expiry time.
273  * If the flush_time is ever set to a time earlier than the nextcheck
274  * time, the nextcheck time is then set to that flush_time.
275  *
276  * A table is then only scanned if the current time is at least
277  * the nextcheck time.
278  *
279  */
280
281 static LIST_HEAD(cache_list);
282 static DEFINE_SPINLOCK(cache_list_lock);
283 static struct cache_detail *current_detail;
284 static int current_index;
285
286 static const struct file_operations cache_file_operations;
287 static const struct file_operations content_file_operations;
288 static const struct file_operations cache_flush_operations;
289
290 static void do_cache_clean(struct work_struct *work);
291 static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
292
293 static void remove_cache_proc_entries(struct cache_detail *cd)
294 {
295         if (cd->proc_ent == NULL)
296                 return;
297         if (cd->flush_ent)
298                 remove_proc_entry("flush", cd->proc_ent);
299         if (cd->channel_ent)
300                 remove_proc_entry("channel", cd->proc_ent);
301         if (cd->content_ent)
302                 remove_proc_entry("content", cd->proc_ent);
303         cd->proc_ent = NULL;
304         remove_proc_entry(cd->name, proc_net_rpc);
305 }
306
307 #ifdef CONFIG_PROC_FS
308 static int create_cache_proc_entries(struct cache_detail *cd)
309 {
310         struct proc_dir_entry *p;
311
312         cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
313         if (cd->proc_ent == NULL)
314                 goto out_nomem;
315         cd->proc_ent->owner = cd->owner;
316         cd->channel_ent = cd->content_ent = NULL;
317
318         p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
319         cd->flush_ent = p;
320         if (p == NULL)
321                 goto out_nomem;
322         p->proc_fops = &cache_flush_operations;
323         p->owner = cd->owner;
324         p->data = cd;
325
326         if (cd->cache_request || cd->cache_parse) {
327                 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
328                                       cd->proc_ent);
329                 cd->channel_ent = p;
330                 if (p == NULL)
331                         goto out_nomem;
332                 p->proc_fops = &cache_file_operations;
333                 p->owner = cd->owner;
334                 p->data = cd;
335         }
336         if (cd->cache_show) {
337                 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
338                                       cd->proc_ent);
339                 cd->content_ent = p;
340                 if (p == NULL)
341                         goto out_nomem;
342                 p->proc_fops = &content_file_operations;
343                 p->owner = cd->owner;
344                 p->data = cd;
345         }
346         return 0;
347 out_nomem:
348         remove_cache_proc_entries(cd);
349         return -ENOMEM;
350 }
351 #else /* CONFIG_PROC_FS */
352 static int create_cache_proc_entries(struct cache_detail *cd)
353 {
354         return 0;
355 }
356 #endif
357
358 int cache_register(struct cache_detail *cd)
359 {
360         int ret;
361
362         ret = create_cache_proc_entries(cd);
363         if (ret)
364                 return ret;
365         rwlock_init(&cd->hash_lock);
366         INIT_LIST_HEAD(&cd->queue);
367         spin_lock(&cache_list_lock);
368         cd->nextcheck = 0;
369         cd->entries = 0;
370         atomic_set(&cd->readers, 0);
371         cd->last_close = 0;
372         cd->last_warn = -1;
373         list_add(&cd->others, &cache_list);
374         spin_unlock(&cache_list_lock);
375
376         /* start the cleaning process */
377         schedule_delayed_work(&cache_cleaner, 0);
378         return 0;
379 }
380
381 void cache_unregister(struct cache_detail *cd)
382 {
383         cache_purge(cd);
384         spin_lock(&cache_list_lock);
385         write_lock(&cd->hash_lock);
386         if (cd->entries || atomic_read(&cd->inuse)) {
387                 write_unlock(&cd->hash_lock);
388                 spin_unlock(&cache_list_lock);
389                 goto out;
390         }
391         if (current_detail == cd)
392                 current_detail = NULL;
393         list_del_init(&cd->others);
394         write_unlock(&cd->hash_lock);
395         spin_unlock(&cache_list_lock);
396         remove_cache_proc_entries(cd);
397         if (list_empty(&cache_list)) {
398                 /* module must be being unloaded so its safe to kill the worker */
399                 cancel_delayed_work_sync(&cache_cleaner);
400         }
401         return;
402 out:
403         printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
404 }
405
406 /* clean cache tries to find something to clean
407  * and cleans it.
408  * It returns 1 if it cleaned something,
409  *            0 if it didn't find anything this time
410  *           -1 if it fell off the end of the list.
411  */
412 static int cache_clean(void)
413 {
414         int rv = 0;
415         struct list_head *next;
416
417         spin_lock(&cache_list_lock);
418
419         /* find a suitable table if we don't already have one */
420         while (current_detail == NULL ||
421             current_index >= current_detail->hash_size) {
422                 if (current_detail)
423                         next = current_detail->others.next;
424                 else
425                         next = cache_list.next;
426                 if (next == &cache_list) {
427                         current_detail = NULL;
428                         spin_unlock(&cache_list_lock);
429                         return -1;
430                 }
431                 current_detail = list_entry(next, struct cache_detail, others);
432                 if (current_detail->nextcheck > get_seconds())
433                         current_index = current_detail->hash_size;
434                 else {
435                         current_index = 0;
436                         current_detail->nextcheck = get_seconds()+30*60;
437                 }
438         }
439
440         /* find a non-empty bucket in the table */
441         while (current_detail &&
442                current_index < current_detail->hash_size &&
443                current_detail->hash_table[current_index] == NULL)
444                 current_index++;
445
446         /* find a cleanable entry in the bucket and clean it, or set to next bucket */
447
448         if (current_detail && current_index < current_detail->hash_size) {
449                 struct cache_head *ch, **cp;
450                 struct cache_detail *d;
451
452                 write_lock(&current_detail->hash_lock);
453
454                 /* Ok, now to clean this strand */
455
456                 cp = & current_detail->hash_table[current_index];
457                 ch = *cp;
458                 for (; ch; cp= & ch->next, ch= *cp) {
459                         if (current_detail->nextcheck > ch->expiry_time)
460                                 current_detail->nextcheck = ch->expiry_time+1;
461                         if (ch->expiry_time >= get_seconds()
462                             && ch->last_refresh >= current_detail->flush_time
463                                 )
464                                 continue;
465                         if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
466                                 queue_loose(current_detail, ch);
467
468                         if (atomic_read(&ch->ref.refcount) == 1)
469                                 break;
470                 }
471                 if (ch) {
472                         *cp = ch->next;
473                         ch->next = NULL;
474                         current_detail->entries--;
475                         rv = 1;
476                 }
477                 write_unlock(&current_detail->hash_lock);
478                 d = current_detail;
479                 if (!ch)
480                         current_index ++;
481                 spin_unlock(&cache_list_lock);
482                 if (ch)
483                         cache_put(ch, d);
484         } else
485                 spin_unlock(&cache_list_lock);
486
487         return rv;
488 }
489
490 /*
491  * We want to regularly clean the cache, so we need to schedule some work ...
492  */
493 static void do_cache_clean(struct work_struct *work)
494 {
495         int delay = 5;
496         if (cache_clean() == -1)
497                 delay = 30*HZ;
498
499         if (list_empty(&cache_list))
500                 delay = 0;
501
502         if (delay)
503                 schedule_delayed_work(&cache_cleaner, delay);
504 }
505
506
507 /*
508  * Clean all caches promptly.  This just calls cache_clean
509  * repeatedly until we are sure that every cache has had a chance to
510  * be fully cleaned
511  */
512 void cache_flush(void)
513 {
514         while (cache_clean() != -1)
515                 cond_resched();
516         while (cache_clean() != -1)
517                 cond_resched();
518 }
519
520 void cache_purge(struct cache_detail *detail)
521 {
522         detail->flush_time = LONG_MAX;
523         detail->nextcheck = get_seconds();
524         cache_flush();
525         detail->flush_time = 1;
526 }
527
528
529
530 /*
531  * Deferral and Revisiting of Requests.
532  *
533  * If a cache lookup finds a pending entry, we
534  * need to defer the request and revisit it later.
535  * All deferred requests are stored in a hash table,
536  * indexed by "struct cache_head *".
537  * As it may be wasteful to store a whole request
538  * structure, we allow the request to provide a
539  * deferred form, which must contain a
540  * 'struct cache_deferred_req'
541  * This cache_deferred_req contains a method to allow
542  * it to be revisited when cache info is available
543  */
544
545 #define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
546 #define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
547
548 #define DFR_MAX 300     /* ??? */
549
550 static DEFINE_SPINLOCK(cache_defer_lock);
551 static LIST_HEAD(cache_defer_list);
552 static struct list_head cache_defer_hash[DFR_HASHSIZE];
553 static int cache_defer_cnt;
554
555 static int cache_defer_req(struct cache_req *req, struct cache_head *item)
556 {
557         struct cache_deferred_req *dreq;
558         int hash = DFR_HASH(item);
559
560         if (cache_defer_cnt >= DFR_MAX) {
561                 /* too much in the cache, randomly drop this one,
562                  * or continue and drop the oldest below
563                  */
564                 if (net_random()&1)
565                         return -ETIMEDOUT;
566         }
567         dreq = req->defer(req);
568         if (dreq == NULL)
569                 return -ETIMEDOUT;
570
571         dreq->item = item;
572         dreq->recv_time = get_seconds();
573
574         spin_lock(&cache_defer_lock);
575
576         list_add(&dreq->recent, &cache_defer_list);
577
578         if (cache_defer_hash[hash].next == NULL)
579                 INIT_LIST_HEAD(&cache_defer_hash[hash]);
580         list_add(&dreq->hash, &cache_defer_hash[hash]);
581
582         /* it is in, now maybe clean up */
583         dreq = NULL;
584         if (++cache_defer_cnt > DFR_MAX) {
585                 dreq = list_entry(cache_defer_list.prev,
586                                   struct cache_deferred_req, recent);
587                 list_del(&dreq->recent);
588                 list_del(&dreq->hash);
589                 cache_defer_cnt--;
590         }
591         spin_unlock(&cache_defer_lock);
592
593         if (dreq) {
594                 /* there was one too many */
595                 dreq->revisit(dreq, 1);
596         }
597         if (!test_bit(CACHE_PENDING, &item->flags)) {
598                 /* must have just been validated... */
599                 cache_revisit_request(item);
600         }
601         return 0;
602 }
603
604 static void cache_revisit_request(struct cache_head *item)
605 {
606         struct cache_deferred_req *dreq;
607         struct list_head pending;
608
609         struct list_head *lp;
610         int hash = DFR_HASH(item);
611
612         INIT_LIST_HEAD(&pending);
613         spin_lock(&cache_defer_lock);
614
615         lp = cache_defer_hash[hash].next;
616         if (lp) {
617                 while (lp != &cache_defer_hash[hash]) {
618                         dreq = list_entry(lp, struct cache_deferred_req, hash);
619                         lp = lp->next;
620                         if (dreq->item == item) {
621                                 list_del(&dreq->hash);
622                                 list_move(&dreq->recent, &pending);
623                                 cache_defer_cnt--;
624                         }
625                 }
626         }
627         spin_unlock(&cache_defer_lock);
628
629         while (!list_empty(&pending)) {
630                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
631                 list_del_init(&dreq->recent);
632                 dreq->revisit(dreq, 0);
633         }
634 }
635
636 void cache_clean_deferred(void *owner)
637 {
638         struct cache_deferred_req *dreq, *tmp;
639         struct list_head pending;
640
641
642         INIT_LIST_HEAD(&pending);
643         spin_lock(&cache_defer_lock);
644
645         list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
646                 if (dreq->owner == owner) {
647                         list_del(&dreq->hash);
648                         list_move(&dreq->recent, &pending);
649                         cache_defer_cnt--;
650                 }
651         }
652         spin_unlock(&cache_defer_lock);
653
654         while (!list_empty(&pending)) {
655                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
656                 list_del_init(&dreq->recent);
657                 dreq->revisit(dreq, 1);
658         }
659 }
660
661 /*
662  * communicate with user-space
663  *
664  * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
665  * On read, you get a full request, or block.
666  * On write, an update request is processed.
667  * Poll works if anything to read, and always allows write.
668  *
669  * Implemented by linked list of requests.  Each open file has
670  * a ->private that also exists in this list.  New requests are added
671  * to the end and may wakeup and preceding readers.
672  * New readers are added to the head.  If, on read, an item is found with
673  * CACHE_UPCALLING clear, we free it from the list.
674  *
675  */
676
677 static DEFINE_SPINLOCK(queue_lock);
678 static DEFINE_MUTEX(queue_io_mutex);
679
680 struct cache_queue {
681         struct list_head        list;
682         int                     reader; /* if 0, then request */
683 };
684 struct cache_request {
685         struct cache_queue      q;
686         struct cache_head       *item;
687         char                    * buf;
688         int                     len;
689         int                     readers;
690 };
691 struct cache_reader {
692         struct cache_queue      q;
693         int                     offset; /* if non-0, we have a refcnt on next request */
694 };
695
696 static ssize_t
697 cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
698 {
699         struct cache_reader *rp = filp->private_data;
700         struct cache_request *rq;
701         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
702         int err;
703
704         if (count == 0)
705                 return 0;
706
707         mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
708                               * readers on this file */
709  again:
710         spin_lock(&queue_lock);
711         /* need to find next request */
712         while (rp->q.list.next != &cd->queue &&
713                list_entry(rp->q.list.next, struct cache_queue, list)
714                ->reader) {
715                 struct list_head *next = rp->q.list.next;
716                 list_move(&rp->q.list, next);
717         }
718         if (rp->q.list.next == &cd->queue) {
719                 spin_unlock(&queue_lock);
720                 mutex_unlock(&queue_io_mutex);
721                 BUG_ON(rp->offset);
722                 return 0;
723         }
724         rq = container_of(rp->q.list.next, struct cache_request, q.list);
725         BUG_ON(rq->q.reader);
726         if (rp->offset == 0)
727                 rq->readers++;
728         spin_unlock(&queue_lock);
729
730         if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
731                 err = -EAGAIN;
732                 spin_lock(&queue_lock);
733                 list_move(&rp->q.list, &rq->q.list);
734                 spin_unlock(&queue_lock);
735         } else {
736                 if (rp->offset + count > rq->len)
737                         count = rq->len - rp->offset;
738                 err = -EFAULT;
739                 if (copy_to_user(buf, rq->buf + rp->offset, count))
740                         goto out;
741                 rp->offset += count;
742                 if (rp->offset >= rq->len) {
743                         rp->offset = 0;
744                         spin_lock(&queue_lock);
745                         list_move(&rp->q.list, &rq->q.list);
746                         spin_unlock(&queue_lock);
747                 }
748                 err = 0;
749         }
750  out:
751         if (rp->offset == 0) {
752                 /* need to release rq */
753                 spin_lock(&queue_lock);
754                 rq->readers--;
755                 if (rq->readers == 0 &&
756                     !test_bit(CACHE_PENDING, &rq->item->flags)) {
757                         list_del(&rq->q.list);
758                         spin_unlock(&queue_lock);
759                         cache_put(rq->item, cd);
760                         kfree(rq->buf);
761                         kfree(rq);
762                 } else
763                         spin_unlock(&queue_lock);
764         }
765         if (err == -EAGAIN)
766                 goto again;
767         mutex_unlock(&queue_io_mutex);
768         return err ? err :  count;
769 }
770
771 static char write_buf[8192]; /* protected by queue_io_mutex */
772
773 static ssize_t
774 cache_write(struct file *filp, const char __user *buf, size_t count,
775             loff_t *ppos)
776 {
777         int err;
778         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
779
780         if (count == 0)
781                 return 0;
782         if (count >= sizeof(write_buf))
783                 return -EINVAL;
784
785         mutex_lock(&queue_io_mutex);
786
787         if (copy_from_user(write_buf, buf, count)) {
788                 mutex_unlock(&queue_io_mutex);
789                 return -EFAULT;
790         }
791         write_buf[count] = '\0';
792         if (cd->cache_parse)
793                 err = cd->cache_parse(cd, write_buf, count);
794         else
795                 err = -EINVAL;
796
797         mutex_unlock(&queue_io_mutex);
798         return err ? err : count;
799 }
800
801 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
802
803 static unsigned int
804 cache_poll(struct file *filp, poll_table *wait)
805 {
806         unsigned int mask;
807         struct cache_reader *rp = filp->private_data;
808         struct cache_queue *cq;
809         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
810
811         poll_wait(filp, &queue_wait, wait);
812
813         /* alway allow write */
814         mask = POLL_OUT | POLLWRNORM;
815
816         if (!rp)
817                 return mask;
818
819         spin_lock(&queue_lock);
820
821         for (cq= &rp->q; &cq->list != &cd->queue;
822              cq = list_entry(cq->list.next, struct cache_queue, list))
823                 if (!cq->reader) {
824                         mask |= POLLIN | POLLRDNORM;
825                         break;
826                 }
827         spin_unlock(&queue_lock);
828         return mask;
829 }
830
831 static int
832 cache_ioctl(struct inode *ino, struct file *filp,
833             unsigned int cmd, unsigned long arg)
834 {
835         int len = 0;
836         struct cache_reader *rp = filp->private_data;
837         struct cache_queue *cq;
838         struct cache_detail *cd = PDE(ino)->data;
839
840         if (cmd != FIONREAD || !rp)
841                 return -EINVAL;
842
843         spin_lock(&queue_lock);
844
845         /* only find the length remaining in current request,
846          * or the length of the next request
847          */
848         for (cq= &rp->q; &cq->list != &cd->queue;
849              cq = list_entry(cq->list.next, struct cache_queue, list))
850                 if (!cq->reader) {
851                         struct cache_request *cr =
852                                 container_of(cq, struct cache_request, q);
853                         len = cr->len - rp->offset;
854                         break;
855                 }
856         spin_unlock(&queue_lock);
857
858         return put_user(len, (int __user *)arg);
859 }
860
861 static int
862 cache_open(struct inode *inode, struct file *filp)
863 {
864         struct cache_reader *rp = NULL;
865
866         nonseekable_open(inode, filp);
867         if (filp->f_mode & FMODE_READ) {
868                 struct cache_detail *cd = PDE(inode)->data;
869
870                 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
871                 if (!rp)
872                         return -ENOMEM;
873                 rp->offset = 0;
874                 rp->q.reader = 1;
875                 atomic_inc(&cd->readers);
876                 spin_lock(&queue_lock);
877                 list_add(&rp->q.list, &cd->queue);
878                 spin_unlock(&queue_lock);
879         }
880         filp->private_data = rp;
881         return 0;
882 }
883
884 static int
885 cache_release(struct inode *inode, struct file *filp)
886 {
887         struct cache_reader *rp = filp->private_data;
888         struct cache_detail *cd = PDE(inode)->data;
889
890         if (rp) {
891                 spin_lock(&queue_lock);
892                 if (rp->offset) {
893                         struct cache_queue *cq;
894                         for (cq= &rp->q; &cq->list != &cd->queue;
895                              cq = list_entry(cq->list.next, struct cache_queue, list))
896                                 if (!cq->reader) {
897                                         container_of(cq, struct cache_request, q)
898                                                 ->readers--;
899                                         break;
900                                 }
901                         rp->offset = 0;
902                 }
903                 list_del(&rp->q.list);
904                 spin_unlock(&queue_lock);
905
906                 filp->private_data = NULL;
907                 kfree(rp);
908
909                 cd->last_close = get_seconds();
910                 atomic_dec(&cd->readers);
911         }
912         return 0;
913 }
914
915
916
917 static const struct file_operations cache_file_operations = {
918         .owner          = THIS_MODULE,
919         .llseek         = no_llseek,
920         .read           = cache_read,
921         .write          = cache_write,
922         .poll           = cache_poll,
923         .ioctl          = cache_ioctl, /* for FIONREAD */
924         .open           = cache_open,
925         .release        = cache_release,
926 };
927
928
929 static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
930 {
931         struct cache_queue *cq;
932         spin_lock(&queue_lock);
933         list_for_each_entry(cq, &detail->queue, list)
934                 if (!cq->reader) {
935                         struct cache_request *cr = container_of(cq, struct cache_request, q);
936                         if (cr->item != ch)
937                                 continue;
938                         if (cr->readers != 0)
939                                 continue;
940                         list_del(&cr->q.list);
941                         spin_unlock(&queue_lock);
942                         cache_put(cr->item, detail);
943                         kfree(cr->buf);
944                         kfree(cr);
945                         return;
946                 }
947         spin_unlock(&queue_lock);
948 }
949
950 /*
951  * Support routines for text-based upcalls.
952  * Fields are separated by spaces.
953  * Fields are either mangled to quote space tab newline slosh with slosh
954  * or a hexified with a leading \x
955  * Record is terminated with newline.
956  *
957  */
958
959 void qword_add(char **bpp, int *lp, char *str)
960 {
961         char *bp = *bpp;
962         int len = *lp;
963         char c;
964
965         if (len < 0) return;
966
967         while ((c=*str++) && len)
968                 switch(c) {
969                 case ' ':
970                 case '\t':
971                 case '\n':
972                 case '\\':
973                         if (len >= 4) {
974                                 *bp++ = '\\';
975                                 *bp++ = '0' + ((c & 0300)>>6);
976                                 *bp++ = '0' + ((c & 0070)>>3);
977                                 *bp++ = '0' + ((c & 0007)>>0);
978                         }
979                         len -= 4;
980                         break;
981                 default:
982                         *bp++ = c;
983                         len--;
984                 }
985         if (c || len <1) len = -1;
986         else {
987                 *bp++ = ' ';
988                 len--;
989         }
990         *bpp = bp;
991         *lp = len;
992 }
993
994 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
995 {
996         char *bp = *bpp;
997         int len = *lp;
998
999         if (len < 0) return;
1000
1001         if (len > 2) {
1002                 *bp++ = '\\';
1003                 *bp++ = 'x';
1004                 len -= 2;
1005                 while (blen && len >= 2) {
1006                         unsigned char c = *buf++;
1007                         *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
1008                         *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
1009                         len -= 2;
1010                         blen--;
1011                 }
1012         }
1013         if (blen || len<1) len = -1;
1014         else {
1015                 *bp++ = ' ';
1016                 len--;
1017         }
1018         *bpp = bp;
1019         *lp = len;
1020 }
1021
1022 static void warn_no_listener(struct cache_detail *detail)
1023 {
1024         if (detail->last_warn != detail->last_close) {
1025                 detail->last_warn = detail->last_close;
1026                 if (detail->warn_no_listener)
1027                         detail->warn_no_listener(detail);
1028         }
1029 }
1030
1031 /*
1032  * register an upcall request to user-space.
1033  * Each request is at most one page long.
1034  */
1035 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1036 {
1037
1038         char *buf;
1039         struct cache_request *crq;
1040         char *bp;
1041         int len;
1042
1043         if (detail->cache_request == NULL)
1044                 return -EINVAL;
1045
1046         if (atomic_read(&detail->readers) == 0 &&
1047             detail->last_close < get_seconds() - 30) {
1048                         warn_no_listener(detail);
1049                         return -EINVAL;
1050         }
1051
1052         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1053         if (!buf)
1054                 return -EAGAIN;
1055
1056         crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1057         if (!crq) {
1058                 kfree(buf);
1059                 return -EAGAIN;
1060         }
1061
1062         bp = buf; len = PAGE_SIZE;
1063
1064         detail->cache_request(detail, h, &bp, &len);
1065
1066         if (len < 0) {
1067                 kfree(buf);
1068                 kfree(crq);
1069                 return -EAGAIN;
1070         }
1071         crq->q.reader = 0;
1072         crq->item = cache_get(h);
1073         crq->buf = buf;
1074         crq->len = PAGE_SIZE - len;
1075         crq->readers = 0;
1076         spin_lock(&queue_lock);
1077         list_add_tail(&crq->q.list, &detail->queue);
1078         spin_unlock(&queue_lock);
1079         wake_up(&queue_wait);
1080         return 0;
1081 }
1082
1083 /*
1084  * parse a message from user-space and pass it
1085  * to an appropriate cache
1086  * Messages are, like requests, separated into fields by
1087  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1088  *
1089  * Message is
1090  *   reply cachename expiry key ... content....
1091  *
1092  * key and content are both parsed by cache
1093  */
1094
1095 #define isodigit(c) (isdigit(c) && c <= '7')
1096 int qword_get(char **bpp, char *dest, int bufsize)
1097 {
1098         /* return bytes copied, or -1 on error */
1099         char *bp = *bpp;
1100         int len = 0;
1101
1102         while (*bp == ' ') bp++;
1103
1104         if (bp[0] == '\\' && bp[1] == 'x') {
1105                 /* HEX STRING */
1106                 bp += 2;
1107                 while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1108                         int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1109                         bp++;
1110                         byte <<= 4;
1111                         byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1112                         *dest++ = byte;
1113                         bp++;
1114                         len++;
1115                 }
1116         } else {
1117                 /* text with \nnn octal quoting */
1118                 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1119                         if (*bp == '\\' &&
1120                             isodigit(bp[1]) && (bp[1] <= '3') &&
1121                             isodigit(bp[2]) &&
1122                             isodigit(bp[3])) {
1123                                 int byte = (*++bp -'0');
1124                                 bp++;
1125                                 byte = (byte << 3) | (*bp++ - '0');
1126                                 byte = (byte << 3) | (*bp++ - '0');
1127                                 *dest++ = byte;
1128                                 len++;
1129                         } else {
1130                                 *dest++ = *bp++;
1131                                 len++;
1132                         }
1133                 }
1134         }
1135
1136         if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1137                 return -1;
1138         while (*bp == ' ') bp++;
1139         *bpp = bp;
1140         *dest = '\0';
1141         return len;
1142 }
1143
1144
1145 /*
1146  * support /proc/sunrpc/cache/$CACHENAME/content
1147  * as a seqfile.
1148  * We call ->cache_show passing NULL for the item to
1149  * get a header, then pass each real item in the cache
1150  */
1151
1152 struct handle {
1153         struct cache_detail *cd;
1154 };
1155
1156 static void *c_start(struct seq_file *m, loff_t *pos)
1157         __acquires(cd->hash_lock)
1158 {
1159         loff_t n = *pos;
1160         unsigned hash, entry;
1161         struct cache_head *ch;
1162         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1163
1164
1165         read_lock(&cd->hash_lock);
1166         if (!n--)
1167                 return SEQ_START_TOKEN;
1168         hash = n >> 32;
1169         entry = n & ((1LL<<32) - 1);
1170
1171         for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1172                 if (!entry--)
1173                         return ch;
1174         n &= ~((1LL<<32) - 1);
1175         do {
1176                 hash++;
1177                 n += 1LL<<32;
1178         } while(hash < cd->hash_size &&
1179                 cd->hash_table[hash]==NULL);
1180         if (hash >= cd->hash_size)
1181                 return NULL;
1182         *pos = n+1;
1183         return cd->hash_table[hash];
1184 }
1185
1186 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1187 {
1188         struct cache_head *ch = p;
1189         int hash = (*pos >> 32);
1190         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1191
1192         if (p == SEQ_START_TOKEN)
1193                 hash = 0;
1194         else if (ch->next == NULL) {
1195                 hash++;
1196                 *pos += 1LL<<32;
1197         } else {
1198                 ++*pos;
1199                 return ch->next;
1200         }
1201         *pos &= ~((1LL<<32) - 1);
1202         while (hash < cd->hash_size &&
1203                cd->hash_table[hash] == NULL) {
1204                 hash++;
1205                 *pos += 1LL<<32;
1206         }
1207         if (hash >= cd->hash_size)
1208                 return NULL;
1209         ++*pos;
1210         return cd->hash_table[hash];
1211 }
1212
1213 static void c_stop(struct seq_file *m, void *p)
1214         __releases(cd->hash_lock)
1215 {
1216         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1217         read_unlock(&cd->hash_lock);
1218 }
1219
1220 static int c_show(struct seq_file *m, void *p)
1221 {
1222         struct cache_head *cp = p;
1223         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1224
1225         if (p == SEQ_START_TOKEN)
1226                 return cd->cache_show(m, cd, NULL);
1227
1228         ifdebug(CACHE)
1229                 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1230                            cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
1231         cache_get(cp);
1232         if (cache_check(cd, cp, NULL))
1233                 /* cache_check does a cache_put on failure */
1234                 seq_printf(m, "# ");
1235         else
1236                 cache_put(cp, cd);
1237
1238         return cd->cache_show(m, cd, cp);
1239 }
1240
1241 static const struct seq_operations cache_content_op = {
1242         .start  = c_start,
1243         .next   = c_next,
1244         .stop   = c_stop,
1245         .show   = c_show,
1246 };
1247
1248 static int content_open(struct inode *inode, struct file *file)
1249 {
1250         struct handle *han;
1251         struct cache_detail *cd = PDE(inode)->data;
1252
1253         han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1254         if (han == NULL)
1255                 return -ENOMEM;
1256
1257         han->cd = cd;
1258         return 0;
1259 }
1260
1261 static const struct file_operations content_file_operations = {
1262         .open           = content_open,
1263         .read           = seq_read,
1264         .llseek         = seq_lseek,
1265         .release        = seq_release_private,
1266 };
1267
1268 static ssize_t read_flush(struct file *file, char __user *buf,
1269                             size_t count, loff_t *ppos)
1270 {
1271         struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1272         char tbuf[20];
1273         unsigned long p = *ppos;
1274         size_t len;
1275
1276         sprintf(tbuf, "%lu\n", cd->flush_time);
1277         len = strlen(tbuf);
1278         if (p >= len)
1279                 return 0;
1280         len -= p;
1281         if (len > count)
1282                 len = count;
1283         if (copy_to_user(buf, (void*)(tbuf+p), len))
1284                 return -EFAULT;
1285         *ppos += len;
1286         return len;
1287 }
1288
1289 static ssize_t write_flush(struct file * file, const char __user * buf,
1290                              size_t count, loff_t *ppos)
1291 {
1292         struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1293         char tbuf[20];
1294         char *ep;
1295         long flushtime;
1296         if (*ppos || count > sizeof(tbuf)-1)
1297                 return -EINVAL;
1298         if (copy_from_user(tbuf, buf, count))
1299                 return -EFAULT;
1300         tbuf[count] = 0;
1301         flushtime = simple_strtoul(tbuf, &ep, 0);
1302         if (*ep && *ep != '\n')
1303                 return -EINVAL;
1304
1305         cd->flush_time = flushtime;
1306         cd->nextcheck = get_seconds();
1307         cache_flush();
1308
1309         *ppos += count;
1310         return count;
1311 }
1312
1313 static const struct file_operations cache_flush_operations = {
1314         .open           = nonseekable_open,
1315         .read           = read_flush,
1316         .write          = write_flush,
1317 };