sunrpc: fix memory leak in unix_gid cache.
[safe/jmp/linux-2.6] / net / sunrpc / cache.c
1 /*
2  * net/sunrpc/cache.c
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34
35 #define  RPCDBG_FACILITY RPCDBG_CACHE
36
37 static int cache_defer_req(struct cache_req *req, struct cache_head *item);
38 static void cache_revisit_request(struct cache_head *item);
39
40 static void cache_init(struct cache_head *h)
41 {
42         time_t now = get_seconds();
43         h->next = NULL;
44         h->flags = 0;
45         kref_init(&h->ref);
46         h->expiry_time = now + CACHE_NEW_EXPIRY;
47         h->last_refresh = now;
48 }
49
50 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
51                                        struct cache_head *key, int hash)
52 {
53         struct cache_head **head,  **hp;
54         struct cache_head *new = NULL;
55
56         head = &detail->hash_table[hash];
57
58         read_lock(&detail->hash_lock);
59
60         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
61                 struct cache_head *tmp = *hp;
62                 if (detail->match(tmp, key)) {
63                         cache_get(tmp);
64                         read_unlock(&detail->hash_lock);
65                         return tmp;
66                 }
67         }
68         read_unlock(&detail->hash_lock);
69         /* Didn't find anything, insert an empty entry */
70
71         new = detail->alloc();
72         if (!new)
73                 return NULL;
74         /* must fully initialise 'new', else
75          * we might get lose if we need to
76          * cache_put it soon.
77          */
78         cache_init(new);
79         detail->init(new, key);
80
81         write_lock(&detail->hash_lock);
82
83         /* check if entry appeared while we slept */
84         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
85                 struct cache_head *tmp = *hp;
86                 if (detail->match(tmp, key)) {
87                         cache_get(tmp);
88                         write_unlock(&detail->hash_lock);
89                         cache_put(new, detail);
90                         return tmp;
91                 }
92         }
93         new->next = *head;
94         *head = new;
95         detail->entries++;
96         cache_get(new);
97         write_unlock(&detail->hash_lock);
98
99         return new;
100 }
101 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
102
103
104 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
105
106 static int cache_fresh_locked(struct cache_head *head, time_t expiry)
107 {
108         head->expiry_time = expiry;
109         head->last_refresh = get_seconds();
110         return !test_and_set_bit(CACHE_VALID, &head->flags);
111 }
112
113 static void cache_fresh_unlocked(struct cache_head *head,
114                         struct cache_detail *detail, int new)
115 {
116         if (new)
117                 cache_revisit_request(head);
118         if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
119                 cache_revisit_request(head);
120                 cache_dequeue(detail, head);
121         }
122 }
123
124 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
125                                        struct cache_head *new, struct cache_head *old, int hash)
126 {
127         /* The 'old' entry is to be replaced by 'new'.
128          * If 'old' is not VALID, we update it directly,
129          * otherwise we need to replace it
130          */
131         struct cache_head **head;
132         struct cache_head *tmp;
133         int is_new;
134
135         if (!test_bit(CACHE_VALID, &old->flags)) {
136                 write_lock(&detail->hash_lock);
137                 if (!test_bit(CACHE_VALID, &old->flags)) {
138                         if (test_bit(CACHE_NEGATIVE, &new->flags))
139                                 set_bit(CACHE_NEGATIVE, &old->flags);
140                         else
141                                 detail->update(old, new);
142                         is_new = cache_fresh_locked(old, new->expiry_time);
143                         write_unlock(&detail->hash_lock);
144                         cache_fresh_unlocked(old, detail, is_new);
145                         return old;
146                 }
147                 write_unlock(&detail->hash_lock);
148         }
149         /* We need to insert a new entry */
150         tmp = detail->alloc();
151         if (!tmp) {
152                 cache_put(old, detail);
153                 return NULL;
154         }
155         cache_init(tmp);
156         detail->init(tmp, old);
157         head = &detail->hash_table[hash];
158
159         write_lock(&detail->hash_lock);
160         if (test_bit(CACHE_NEGATIVE, &new->flags))
161                 set_bit(CACHE_NEGATIVE, &tmp->flags);
162         else
163                 detail->update(tmp, new);
164         tmp->next = *head;
165         *head = tmp;
166         detail->entries++;
167         cache_get(tmp);
168         is_new = cache_fresh_locked(tmp, new->expiry_time);
169         cache_fresh_locked(old, 0);
170         write_unlock(&detail->hash_lock);
171         cache_fresh_unlocked(tmp, detail, is_new);
172         cache_fresh_unlocked(old, detail, 0);
173         cache_put(old, detail);
174         return tmp;
175 }
176 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
177
178 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
179
180 static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
181 {
182         if (!test_bit(CACHE_VALID, &h->flags) ||
183             h->expiry_time < get_seconds())
184                 return -EAGAIN;
185         else if (detail->flush_time > h->last_refresh)
186                 return -EAGAIN;
187         else {
188                 /* entry is valid */
189                 if (test_bit(CACHE_NEGATIVE, &h->flags))
190                         return -ENOENT;
191                 else
192                         return 0;
193         }
194 }
195 /*
196  * This is the generic cache management routine for all
197  * the authentication caches.
198  * It checks the currency of a cache item and will (later)
199  * initiate an upcall to fill it if needed.
200  *
201  *
202  * Returns 0 if the cache_head can be used, or cache_puts it and returns
203  * -EAGAIN if upcall is pending and request has been queued
204  * -ETIMEDOUT if upcall failed or request could not be queue or
205  *           upcall completed but item is still invalid (implying that
206  *           the cache item has been replaced with a newer one).
207  * -ENOENT if cache entry was negative
208  */
209 int cache_check(struct cache_detail *detail,
210                     struct cache_head *h, struct cache_req *rqstp)
211 {
212         int rv;
213         long refresh_age, age;
214
215         /* First decide return status as best we can */
216         rv = cache_is_valid(detail, h);
217
218         /* now see if we want to start an upcall */
219         refresh_age = (h->expiry_time - h->last_refresh);
220         age = get_seconds() - h->last_refresh;
221
222         if (rqstp == NULL) {
223                 if (rv == -EAGAIN)
224                         rv = -ENOENT;
225         } else if (rv == -EAGAIN || age > refresh_age/2) {
226                 dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
227                                 refresh_age, age);
228                 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
229                         switch (cache_make_upcall(detail, h)) {
230                         case -EINVAL:
231                                 clear_bit(CACHE_PENDING, &h->flags);
232                                 cache_revisit_request(h);
233                                 if (rv == -EAGAIN) {
234                                         set_bit(CACHE_NEGATIVE, &h->flags);
235                                         cache_fresh_unlocked(h, detail,
236                                              cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
237                                         rv = -ENOENT;
238                                 }
239                                 break;
240
241                         case -EAGAIN:
242                                 clear_bit(CACHE_PENDING, &h->flags);
243                                 cache_revisit_request(h);
244                                 break;
245                         }
246                 }
247         }
248
249         if (rv == -EAGAIN) {
250                 if (cache_defer_req(rqstp, h) == 0) {
251                         /* Request is not deferred */
252                         rv = cache_is_valid(detail, h);
253                         if (rv == -EAGAIN)
254                                 rv = -ETIMEDOUT;
255                 }
256         }
257         if (rv)
258                 cache_put(h, detail);
259         return rv;
260 }
261 EXPORT_SYMBOL_GPL(cache_check);
262
263 /*
264  * caches need to be periodically cleaned.
265  * For this we maintain a list of cache_detail and
266  * a current pointer into that list and into the table
267  * for that entry.
268  *
269  * Each time clean_cache is called it finds the next non-empty entry
270  * in the current table and walks the list in that entry
271  * looking for entries that can be removed.
272  *
273  * An entry gets removed if:
274  * - The expiry is before current time
275  * - The last_refresh time is before the flush_time for that cache
276  *
277  * later we might drop old entries with non-NEVER expiry if that table
278  * is getting 'full' for some definition of 'full'
279  *
280  * The question of "how often to scan a table" is an interesting one
281  * and is answered in part by the use of the "nextcheck" field in the
282  * cache_detail.
283  * When a scan of a table begins, the nextcheck field is set to a time
284  * that is well into the future.
285  * While scanning, if an expiry time is found that is earlier than the
286  * current nextcheck time, nextcheck is set to that expiry time.
287  * If the flush_time is ever set to a time earlier than the nextcheck
288  * time, the nextcheck time is then set to that flush_time.
289  *
290  * A table is then only scanned if the current time is at least
291  * the nextcheck time.
292  *
293  */
294
295 static LIST_HEAD(cache_list);
296 static DEFINE_SPINLOCK(cache_list_lock);
297 static struct cache_detail *current_detail;
298 static int current_index;
299
300 static const struct file_operations cache_file_operations;
301 static const struct file_operations content_file_operations;
302 static const struct file_operations cache_flush_operations;
303
304 static void do_cache_clean(struct work_struct *work);
305 static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
306
307 static void remove_cache_proc_entries(struct cache_detail *cd)
308 {
309         if (cd->proc_ent == NULL)
310                 return;
311         if (cd->flush_ent)
312                 remove_proc_entry("flush", cd->proc_ent);
313         if (cd->channel_ent)
314                 remove_proc_entry("channel", cd->proc_ent);
315         if (cd->content_ent)
316                 remove_proc_entry("content", cd->proc_ent);
317         cd->proc_ent = NULL;
318         remove_proc_entry(cd->name, proc_net_rpc);
319 }
320
321 #ifdef CONFIG_PROC_FS
322 static int create_cache_proc_entries(struct cache_detail *cd)
323 {
324         struct proc_dir_entry *p;
325
326         cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
327         if (cd->proc_ent == NULL)
328                 goto out_nomem;
329         cd->channel_ent = cd->content_ent = NULL;
330
331         p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
332                              cd->proc_ent, &cache_flush_operations, cd);
333         cd->flush_ent = p;
334         if (p == NULL)
335                 goto out_nomem;
336
337         if (cd->cache_request || cd->cache_parse) {
338                 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
339                                      cd->proc_ent, &cache_file_operations, cd);
340                 cd->channel_ent = p;
341                 if (p == NULL)
342                         goto out_nomem;
343         }
344         if (cd->cache_show) {
345                 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
346                                 cd->proc_ent, &content_file_operations, cd);
347                 cd->content_ent = p;
348                 if (p == NULL)
349                         goto out_nomem;
350         }
351         return 0;
352 out_nomem:
353         remove_cache_proc_entries(cd);
354         return -ENOMEM;
355 }
356 #else /* CONFIG_PROC_FS */
357 static int create_cache_proc_entries(struct cache_detail *cd)
358 {
359         return 0;
360 }
361 #endif
362
363 int cache_register(struct cache_detail *cd)
364 {
365         int ret;
366
367         ret = create_cache_proc_entries(cd);
368         if (ret)
369                 return ret;
370         rwlock_init(&cd->hash_lock);
371         INIT_LIST_HEAD(&cd->queue);
372         spin_lock(&cache_list_lock);
373         cd->nextcheck = 0;
374         cd->entries = 0;
375         atomic_set(&cd->readers, 0);
376         cd->last_close = 0;
377         cd->last_warn = -1;
378         list_add(&cd->others, &cache_list);
379         spin_unlock(&cache_list_lock);
380
381         /* start the cleaning process */
382         schedule_delayed_work(&cache_cleaner, 0);
383         return 0;
384 }
385 EXPORT_SYMBOL_GPL(cache_register);
386
387 void cache_unregister(struct cache_detail *cd)
388 {
389         cache_purge(cd);
390         spin_lock(&cache_list_lock);
391         write_lock(&cd->hash_lock);
392         if (cd->entries || atomic_read(&cd->inuse)) {
393                 write_unlock(&cd->hash_lock);
394                 spin_unlock(&cache_list_lock);
395                 goto out;
396         }
397         if (current_detail == cd)
398                 current_detail = NULL;
399         list_del_init(&cd->others);
400         write_unlock(&cd->hash_lock);
401         spin_unlock(&cache_list_lock);
402         remove_cache_proc_entries(cd);
403         if (list_empty(&cache_list)) {
404                 /* module must be being unloaded so its safe to kill the worker */
405                 cancel_delayed_work_sync(&cache_cleaner);
406         }
407         return;
408 out:
409         printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
410 }
411 EXPORT_SYMBOL_GPL(cache_unregister);
412
413 /* clean cache tries to find something to clean
414  * and cleans it.
415  * It returns 1 if it cleaned something,
416  *            0 if it didn't find anything this time
417  *           -1 if it fell off the end of the list.
418  */
419 static int cache_clean(void)
420 {
421         int rv = 0;
422         struct list_head *next;
423
424         spin_lock(&cache_list_lock);
425
426         /* find a suitable table if we don't already have one */
427         while (current_detail == NULL ||
428             current_index >= current_detail->hash_size) {
429                 if (current_detail)
430                         next = current_detail->others.next;
431                 else
432                         next = cache_list.next;
433                 if (next == &cache_list) {
434                         current_detail = NULL;
435                         spin_unlock(&cache_list_lock);
436                         return -1;
437                 }
438                 current_detail = list_entry(next, struct cache_detail, others);
439                 if (current_detail->nextcheck > get_seconds())
440                         current_index = current_detail->hash_size;
441                 else {
442                         current_index = 0;
443                         current_detail->nextcheck = get_seconds()+30*60;
444                 }
445         }
446
447         /* find a non-empty bucket in the table */
448         while (current_detail &&
449                current_index < current_detail->hash_size &&
450                current_detail->hash_table[current_index] == NULL)
451                 current_index++;
452
453         /* find a cleanable entry in the bucket and clean it, or set to next bucket */
454
455         if (current_detail && current_index < current_detail->hash_size) {
456                 struct cache_head *ch, **cp;
457                 struct cache_detail *d;
458
459                 write_lock(&current_detail->hash_lock);
460
461                 /* Ok, now to clean this strand */
462
463                 cp = & current_detail->hash_table[current_index];
464                 ch = *cp;
465                 for (; ch; cp= & ch->next, ch= *cp) {
466                         if (current_detail->nextcheck > ch->expiry_time)
467                                 current_detail->nextcheck = ch->expiry_time+1;
468                         if (ch->expiry_time >= get_seconds()
469                             && ch->last_refresh >= current_detail->flush_time
470                                 )
471                                 continue;
472                         if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
473                                 cache_dequeue(current_detail, ch);
474
475                         if (atomic_read(&ch->ref.refcount) == 1)
476                                 break;
477                 }
478                 if (ch) {
479                         *cp = ch->next;
480                         ch->next = NULL;
481                         current_detail->entries--;
482                         rv = 1;
483                 }
484                 write_unlock(&current_detail->hash_lock);
485                 d = current_detail;
486                 if (!ch)
487                         current_index ++;
488                 spin_unlock(&cache_list_lock);
489                 if (ch) {
490                         cache_revisit_request(ch);
491                         cache_put(ch, d);
492                 }
493         } else
494                 spin_unlock(&cache_list_lock);
495
496         return rv;
497 }
498
499 /*
500  * We want to regularly clean the cache, so we need to schedule some work ...
501  */
502 static void do_cache_clean(struct work_struct *work)
503 {
504         int delay = 5;
505         if (cache_clean() == -1)
506                 delay = round_jiffies_relative(30*HZ);
507
508         if (list_empty(&cache_list))
509                 delay = 0;
510
511         if (delay)
512                 schedule_delayed_work(&cache_cleaner, delay);
513 }
514
515
516 /*
517  * Clean all caches promptly.  This just calls cache_clean
518  * repeatedly until we are sure that every cache has had a chance to
519  * be fully cleaned
520  */
521 void cache_flush(void)
522 {
523         while (cache_clean() != -1)
524                 cond_resched();
525         while (cache_clean() != -1)
526                 cond_resched();
527 }
528 EXPORT_SYMBOL_GPL(cache_flush);
529
530 void cache_purge(struct cache_detail *detail)
531 {
532         detail->flush_time = LONG_MAX;
533         detail->nextcheck = get_seconds();
534         cache_flush();
535         detail->flush_time = 1;
536 }
537 EXPORT_SYMBOL_GPL(cache_purge);
538
539
540 /*
541  * Deferral and Revisiting of Requests.
542  *
543  * If a cache lookup finds a pending entry, we
544  * need to defer the request and revisit it later.
545  * All deferred requests are stored in a hash table,
546  * indexed by "struct cache_head *".
547  * As it may be wasteful to store a whole request
548  * structure, we allow the request to provide a
549  * deferred form, which must contain a
550  * 'struct cache_deferred_req'
551  * This cache_deferred_req contains a method to allow
552  * it to be revisited when cache info is available
553  */
554
555 #define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
556 #define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
557
558 #define DFR_MAX 300     /* ??? */
559
560 static DEFINE_SPINLOCK(cache_defer_lock);
561 static LIST_HEAD(cache_defer_list);
562 static struct list_head cache_defer_hash[DFR_HASHSIZE];
563 static int cache_defer_cnt;
564
565 static int cache_defer_req(struct cache_req *req, struct cache_head *item)
566 {
567         struct cache_deferred_req *dreq;
568         int hash = DFR_HASH(item);
569
570         if (cache_defer_cnt >= DFR_MAX) {
571                 /* too much in the cache, randomly drop this one,
572                  * or continue and drop the oldest below
573                  */
574                 if (net_random()&1)
575                         return 0;
576         }
577         dreq = req->defer(req);
578         if (dreq == NULL)
579                 return 0;
580
581         dreq->item = item;
582
583         spin_lock(&cache_defer_lock);
584
585         list_add(&dreq->recent, &cache_defer_list);
586
587         if (cache_defer_hash[hash].next == NULL)
588                 INIT_LIST_HEAD(&cache_defer_hash[hash]);
589         list_add(&dreq->hash, &cache_defer_hash[hash]);
590
591         /* it is in, now maybe clean up */
592         dreq = NULL;
593         if (++cache_defer_cnt > DFR_MAX) {
594                 dreq = list_entry(cache_defer_list.prev,
595                                   struct cache_deferred_req, recent);
596                 list_del(&dreq->recent);
597                 list_del(&dreq->hash);
598                 cache_defer_cnt--;
599         }
600         spin_unlock(&cache_defer_lock);
601
602         if (dreq) {
603                 /* there was one too many */
604                 dreq->revisit(dreq, 1);
605         }
606         if (!test_bit(CACHE_PENDING, &item->flags)) {
607                 /* must have just been validated... */
608                 cache_revisit_request(item);
609                 return 0;
610         }
611         return 1;
612 }
613
614 static void cache_revisit_request(struct cache_head *item)
615 {
616         struct cache_deferred_req *dreq;
617         struct list_head pending;
618
619         struct list_head *lp;
620         int hash = DFR_HASH(item);
621
622         INIT_LIST_HEAD(&pending);
623         spin_lock(&cache_defer_lock);
624
625         lp = cache_defer_hash[hash].next;
626         if (lp) {
627                 while (lp != &cache_defer_hash[hash]) {
628                         dreq = list_entry(lp, struct cache_deferred_req, hash);
629                         lp = lp->next;
630                         if (dreq->item == item) {
631                                 list_del(&dreq->hash);
632                                 list_move(&dreq->recent, &pending);
633                                 cache_defer_cnt--;
634                         }
635                 }
636         }
637         spin_unlock(&cache_defer_lock);
638
639         while (!list_empty(&pending)) {
640                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
641                 list_del_init(&dreq->recent);
642                 dreq->revisit(dreq, 0);
643         }
644 }
645
646 void cache_clean_deferred(void *owner)
647 {
648         struct cache_deferred_req *dreq, *tmp;
649         struct list_head pending;
650
651
652         INIT_LIST_HEAD(&pending);
653         spin_lock(&cache_defer_lock);
654
655         list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
656                 if (dreq->owner == owner) {
657                         list_del(&dreq->hash);
658                         list_move(&dreq->recent, &pending);
659                         cache_defer_cnt--;
660                 }
661         }
662         spin_unlock(&cache_defer_lock);
663
664         while (!list_empty(&pending)) {
665                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
666                 list_del_init(&dreq->recent);
667                 dreq->revisit(dreq, 1);
668         }
669 }
670
671 /*
672  * communicate with user-space
673  *
674  * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
675  * On read, you get a full request, or block.
676  * On write, an update request is processed.
677  * Poll works if anything to read, and always allows write.
678  *
679  * Implemented by linked list of requests.  Each open file has
680  * a ->private that also exists in this list.  New requests are added
681  * to the end and may wakeup and preceding readers.
682  * New readers are added to the head.  If, on read, an item is found with
683  * CACHE_UPCALLING clear, we free it from the list.
684  *
685  */
686
687 static DEFINE_SPINLOCK(queue_lock);
688 static DEFINE_MUTEX(queue_io_mutex);
689
690 struct cache_queue {
691         struct list_head        list;
692         int                     reader; /* if 0, then request */
693 };
694 struct cache_request {
695         struct cache_queue      q;
696         struct cache_head       *item;
697         char                    * buf;
698         int                     len;
699         int                     readers;
700 };
701 struct cache_reader {
702         struct cache_queue      q;
703         int                     offset; /* if non-0, we have a refcnt on next request */
704 };
705
706 static ssize_t
707 cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
708 {
709         struct cache_reader *rp = filp->private_data;
710         struct cache_request *rq;
711         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
712         int err;
713
714         if (count == 0)
715                 return 0;
716
717         mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
718                               * readers on this file */
719  again:
720         spin_lock(&queue_lock);
721         /* need to find next request */
722         while (rp->q.list.next != &cd->queue &&
723                list_entry(rp->q.list.next, struct cache_queue, list)
724                ->reader) {
725                 struct list_head *next = rp->q.list.next;
726                 list_move(&rp->q.list, next);
727         }
728         if (rp->q.list.next == &cd->queue) {
729                 spin_unlock(&queue_lock);
730                 mutex_unlock(&queue_io_mutex);
731                 BUG_ON(rp->offset);
732                 return 0;
733         }
734         rq = container_of(rp->q.list.next, struct cache_request, q.list);
735         BUG_ON(rq->q.reader);
736         if (rp->offset == 0)
737                 rq->readers++;
738         spin_unlock(&queue_lock);
739
740         if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
741                 err = -EAGAIN;
742                 spin_lock(&queue_lock);
743                 list_move(&rp->q.list, &rq->q.list);
744                 spin_unlock(&queue_lock);
745         } else {
746                 if (rp->offset + count > rq->len)
747                         count = rq->len - rp->offset;
748                 err = -EFAULT;
749                 if (copy_to_user(buf, rq->buf + rp->offset, count))
750                         goto out;
751                 rp->offset += count;
752                 if (rp->offset >= rq->len) {
753                         rp->offset = 0;
754                         spin_lock(&queue_lock);
755                         list_move(&rp->q.list, &rq->q.list);
756                         spin_unlock(&queue_lock);
757                 }
758                 err = 0;
759         }
760  out:
761         if (rp->offset == 0) {
762                 /* need to release rq */
763                 spin_lock(&queue_lock);
764                 rq->readers--;
765                 if (rq->readers == 0 &&
766                     !test_bit(CACHE_PENDING, &rq->item->flags)) {
767                         list_del(&rq->q.list);
768                         spin_unlock(&queue_lock);
769                         cache_put(rq->item, cd);
770                         kfree(rq->buf);
771                         kfree(rq);
772                 } else
773                         spin_unlock(&queue_lock);
774         }
775         if (err == -EAGAIN)
776                 goto again;
777         mutex_unlock(&queue_io_mutex);
778         return err ? err :  count;
779 }
780
781 static char write_buf[8192]; /* protected by queue_io_mutex */
782
783 static ssize_t
784 cache_write(struct file *filp, const char __user *buf, size_t count,
785             loff_t *ppos)
786 {
787         int err;
788         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
789
790         if (count == 0)
791                 return 0;
792         if (count >= sizeof(write_buf))
793                 return -EINVAL;
794
795         mutex_lock(&queue_io_mutex);
796
797         if (copy_from_user(write_buf, buf, count)) {
798                 mutex_unlock(&queue_io_mutex);
799                 return -EFAULT;
800         }
801         write_buf[count] = '\0';
802         if (cd->cache_parse)
803                 err = cd->cache_parse(cd, write_buf, count);
804         else
805                 err = -EINVAL;
806
807         mutex_unlock(&queue_io_mutex);
808         return err ? err : count;
809 }
810
811 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
812
813 static unsigned int
814 cache_poll(struct file *filp, poll_table *wait)
815 {
816         unsigned int mask;
817         struct cache_reader *rp = filp->private_data;
818         struct cache_queue *cq;
819         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
820
821         poll_wait(filp, &queue_wait, wait);
822
823         /* alway allow write */
824         mask = POLL_OUT | POLLWRNORM;
825
826         if (!rp)
827                 return mask;
828
829         spin_lock(&queue_lock);
830
831         for (cq= &rp->q; &cq->list != &cd->queue;
832              cq = list_entry(cq->list.next, struct cache_queue, list))
833                 if (!cq->reader) {
834                         mask |= POLLIN | POLLRDNORM;
835                         break;
836                 }
837         spin_unlock(&queue_lock);
838         return mask;
839 }
840
841 static int
842 cache_ioctl(struct inode *ino, struct file *filp,
843             unsigned int cmd, unsigned long arg)
844 {
845         int len = 0;
846         struct cache_reader *rp = filp->private_data;
847         struct cache_queue *cq;
848         struct cache_detail *cd = PDE(ino)->data;
849
850         if (cmd != FIONREAD || !rp)
851                 return -EINVAL;
852
853         spin_lock(&queue_lock);
854
855         /* only find the length remaining in current request,
856          * or the length of the next request
857          */
858         for (cq= &rp->q; &cq->list != &cd->queue;
859              cq = list_entry(cq->list.next, struct cache_queue, list))
860                 if (!cq->reader) {
861                         struct cache_request *cr =
862                                 container_of(cq, struct cache_request, q);
863                         len = cr->len - rp->offset;
864                         break;
865                 }
866         spin_unlock(&queue_lock);
867
868         return put_user(len, (int __user *)arg);
869 }
870
871 static int
872 cache_open(struct inode *inode, struct file *filp)
873 {
874         struct cache_reader *rp = NULL;
875
876         nonseekable_open(inode, filp);
877         if (filp->f_mode & FMODE_READ) {
878                 struct cache_detail *cd = PDE(inode)->data;
879
880                 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
881                 if (!rp)
882                         return -ENOMEM;
883                 rp->offset = 0;
884                 rp->q.reader = 1;
885                 atomic_inc(&cd->readers);
886                 spin_lock(&queue_lock);
887                 list_add(&rp->q.list, &cd->queue);
888                 spin_unlock(&queue_lock);
889         }
890         filp->private_data = rp;
891         return 0;
892 }
893
894 static int
895 cache_release(struct inode *inode, struct file *filp)
896 {
897         struct cache_reader *rp = filp->private_data;
898         struct cache_detail *cd = PDE(inode)->data;
899
900         if (rp) {
901                 spin_lock(&queue_lock);
902                 if (rp->offset) {
903                         struct cache_queue *cq;
904                         for (cq= &rp->q; &cq->list != &cd->queue;
905                              cq = list_entry(cq->list.next, struct cache_queue, list))
906                                 if (!cq->reader) {
907                                         container_of(cq, struct cache_request, q)
908                                                 ->readers--;
909                                         break;
910                                 }
911                         rp->offset = 0;
912                 }
913                 list_del(&rp->q.list);
914                 spin_unlock(&queue_lock);
915
916                 filp->private_data = NULL;
917                 kfree(rp);
918
919                 cd->last_close = get_seconds();
920                 atomic_dec(&cd->readers);
921         }
922         return 0;
923 }
924
925
926
927 static const struct file_operations cache_file_operations = {
928         .owner          = THIS_MODULE,
929         .llseek         = no_llseek,
930         .read           = cache_read,
931         .write          = cache_write,
932         .poll           = cache_poll,
933         .ioctl          = cache_ioctl, /* for FIONREAD */
934         .open           = cache_open,
935         .release        = cache_release,
936 };
937
938
939 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
940 {
941         struct cache_queue *cq;
942         spin_lock(&queue_lock);
943         list_for_each_entry(cq, &detail->queue, list)
944                 if (!cq->reader) {
945                         struct cache_request *cr = container_of(cq, struct cache_request, q);
946                         if (cr->item != ch)
947                                 continue;
948                         if (cr->readers != 0)
949                                 continue;
950                         list_del(&cr->q.list);
951                         spin_unlock(&queue_lock);
952                         cache_put(cr->item, detail);
953                         kfree(cr->buf);
954                         kfree(cr);
955                         return;
956                 }
957         spin_unlock(&queue_lock);
958 }
959
960 /*
961  * Support routines for text-based upcalls.
962  * Fields are separated by spaces.
963  * Fields are either mangled to quote space tab newline slosh with slosh
964  * or a hexified with a leading \x
965  * Record is terminated with newline.
966  *
967  */
968
969 void qword_add(char **bpp, int *lp, char *str)
970 {
971         char *bp = *bpp;
972         int len = *lp;
973         char c;
974
975         if (len < 0) return;
976
977         while ((c=*str++) && len)
978                 switch(c) {
979                 case ' ':
980                 case '\t':
981                 case '\n':
982                 case '\\':
983                         if (len >= 4) {
984                                 *bp++ = '\\';
985                                 *bp++ = '0' + ((c & 0300)>>6);
986                                 *bp++ = '0' + ((c & 0070)>>3);
987                                 *bp++ = '0' + ((c & 0007)>>0);
988                         }
989                         len -= 4;
990                         break;
991                 default:
992                         *bp++ = c;
993                         len--;
994                 }
995         if (c || len <1) len = -1;
996         else {
997                 *bp++ = ' ';
998                 len--;
999         }
1000         *bpp = bp;
1001         *lp = len;
1002 }
1003 EXPORT_SYMBOL_GPL(qword_add);
1004
1005 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1006 {
1007         char *bp = *bpp;
1008         int len = *lp;
1009
1010         if (len < 0) return;
1011
1012         if (len > 2) {
1013                 *bp++ = '\\';
1014                 *bp++ = 'x';
1015                 len -= 2;
1016                 while (blen && len >= 2) {
1017                         unsigned char c = *buf++;
1018                         *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
1019                         *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
1020                         len -= 2;
1021                         blen--;
1022                 }
1023         }
1024         if (blen || len<1) len = -1;
1025         else {
1026                 *bp++ = ' ';
1027                 len--;
1028         }
1029         *bpp = bp;
1030         *lp = len;
1031 }
1032 EXPORT_SYMBOL_GPL(qword_addhex);
1033
1034 static void warn_no_listener(struct cache_detail *detail)
1035 {
1036         if (detail->last_warn != detail->last_close) {
1037                 detail->last_warn = detail->last_close;
1038                 if (detail->warn_no_listener)
1039                         detail->warn_no_listener(detail);
1040         }
1041 }
1042
1043 /*
1044  * register an upcall request to user-space.
1045  * Each request is at most one page long.
1046  */
1047 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1048 {
1049
1050         char *buf;
1051         struct cache_request *crq;
1052         char *bp;
1053         int len;
1054
1055         if (detail->cache_request == NULL)
1056                 return -EINVAL;
1057
1058         if (atomic_read(&detail->readers) == 0 &&
1059             detail->last_close < get_seconds() - 30) {
1060                         warn_no_listener(detail);
1061                         return -EINVAL;
1062         }
1063
1064         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1065         if (!buf)
1066                 return -EAGAIN;
1067
1068         crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1069         if (!crq) {
1070                 kfree(buf);
1071                 return -EAGAIN;
1072         }
1073
1074         bp = buf; len = PAGE_SIZE;
1075
1076         detail->cache_request(detail, h, &bp, &len);
1077
1078         if (len < 0) {
1079                 kfree(buf);
1080                 kfree(crq);
1081                 return -EAGAIN;
1082         }
1083         crq->q.reader = 0;
1084         crq->item = cache_get(h);
1085         crq->buf = buf;
1086         crq->len = PAGE_SIZE - len;
1087         crq->readers = 0;
1088         spin_lock(&queue_lock);
1089         list_add_tail(&crq->q.list, &detail->queue);
1090         spin_unlock(&queue_lock);
1091         wake_up(&queue_wait);
1092         return 0;
1093 }
1094
1095 /*
1096  * parse a message from user-space and pass it
1097  * to an appropriate cache
1098  * Messages are, like requests, separated into fields by
1099  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1100  *
1101  * Message is
1102  *   reply cachename expiry key ... content....
1103  *
1104  * key and content are both parsed by cache
1105  */
1106
1107 #define isodigit(c) (isdigit(c) && c <= '7')
1108 int qword_get(char **bpp, char *dest, int bufsize)
1109 {
1110         /* return bytes copied, or -1 on error */
1111         char *bp = *bpp;
1112         int len = 0;
1113
1114         while (*bp == ' ') bp++;
1115
1116         if (bp[0] == '\\' && bp[1] == 'x') {
1117                 /* HEX STRING */
1118                 bp += 2;
1119                 while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1120                         int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1121                         bp++;
1122                         byte <<= 4;
1123                         byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1124                         *dest++ = byte;
1125                         bp++;
1126                         len++;
1127                 }
1128         } else {
1129                 /* text with \nnn octal quoting */
1130                 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1131                         if (*bp == '\\' &&
1132                             isodigit(bp[1]) && (bp[1] <= '3') &&
1133                             isodigit(bp[2]) &&
1134                             isodigit(bp[3])) {
1135                                 int byte = (*++bp -'0');
1136                                 bp++;
1137                                 byte = (byte << 3) | (*bp++ - '0');
1138                                 byte = (byte << 3) | (*bp++ - '0');
1139                                 *dest++ = byte;
1140                                 len++;
1141                         } else {
1142                                 *dest++ = *bp++;
1143                                 len++;
1144                         }
1145                 }
1146         }
1147
1148         if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1149                 return -1;
1150         while (*bp == ' ') bp++;
1151         *bpp = bp;
1152         *dest = '\0';
1153         return len;
1154 }
1155 EXPORT_SYMBOL_GPL(qword_get);
1156
1157
1158 /*
1159  * support /proc/sunrpc/cache/$CACHENAME/content
1160  * as a seqfile.
1161  * We call ->cache_show passing NULL for the item to
1162  * get a header, then pass each real item in the cache
1163  */
1164
1165 struct handle {
1166         struct cache_detail *cd;
1167 };
1168
1169 static void *c_start(struct seq_file *m, loff_t *pos)
1170         __acquires(cd->hash_lock)
1171 {
1172         loff_t n = *pos;
1173         unsigned hash, entry;
1174         struct cache_head *ch;
1175         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1176
1177
1178         read_lock(&cd->hash_lock);
1179         if (!n--)
1180                 return SEQ_START_TOKEN;
1181         hash = n >> 32;
1182         entry = n & ((1LL<<32) - 1);
1183
1184         for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1185                 if (!entry--)
1186                         return ch;
1187         n &= ~((1LL<<32) - 1);
1188         do {
1189                 hash++;
1190                 n += 1LL<<32;
1191         } while(hash < cd->hash_size &&
1192                 cd->hash_table[hash]==NULL);
1193         if (hash >= cd->hash_size)
1194                 return NULL;
1195         *pos = n+1;
1196         return cd->hash_table[hash];
1197 }
1198
1199 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1200 {
1201         struct cache_head *ch = p;
1202         int hash = (*pos >> 32);
1203         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1204
1205         if (p == SEQ_START_TOKEN)
1206                 hash = 0;
1207         else if (ch->next == NULL) {
1208                 hash++;
1209                 *pos += 1LL<<32;
1210         } else {
1211                 ++*pos;
1212                 return ch->next;
1213         }
1214         *pos &= ~((1LL<<32) - 1);
1215         while (hash < cd->hash_size &&
1216                cd->hash_table[hash] == NULL) {
1217                 hash++;
1218                 *pos += 1LL<<32;
1219         }
1220         if (hash >= cd->hash_size)
1221                 return NULL;
1222         ++*pos;
1223         return cd->hash_table[hash];
1224 }
1225
1226 static void c_stop(struct seq_file *m, void *p)
1227         __releases(cd->hash_lock)
1228 {
1229         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1230         read_unlock(&cd->hash_lock);
1231 }
1232
1233 static int c_show(struct seq_file *m, void *p)
1234 {
1235         struct cache_head *cp = p;
1236         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1237
1238         if (p == SEQ_START_TOKEN)
1239                 return cd->cache_show(m, cd, NULL);
1240
1241         ifdebug(CACHE)
1242                 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1243                            cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
1244         cache_get(cp);
1245         if (cache_check(cd, cp, NULL))
1246                 /* cache_check does a cache_put on failure */
1247                 seq_printf(m, "# ");
1248         else
1249                 cache_put(cp, cd);
1250
1251         return cd->cache_show(m, cd, cp);
1252 }
1253
1254 static const struct seq_operations cache_content_op = {
1255         .start  = c_start,
1256         .next   = c_next,
1257         .stop   = c_stop,
1258         .show   = c_show,
1259 };
1260
1261 static int content_open(struct inode *inode, struct file *file)
1262 {
1263         struct handle *han;
1264         struct cache_detail *cd = PDE(inode)->data;
1265
1266         han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1267         if (han == NULL)
1268                 return -ENOMEM;
1269
1270         han->cd = cd;
1271         return 0;
1272 }
1273
1274 static const struct file_operations content_file_operations = {
1275         .open           = content_open,
1276         .read           = seq_read,
1277         .llseek         = seq_lseek,
1278         .release        = seq_release_private,
1279 };
1280
1281 static ssize_t read_flush(struct file *file, char __user *buf,
1282                             size_t count, loff_t *ppos)
1283 {
1284         struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1285         char tbuf[20];
1286         unsigned long p = *ppos;
1287         size_t len;
1288
1289         sprintf(tbuf, "%lu\n", cd->flush_time);
1290         len = strlen(tbuf);
1291         if (p >= len)
1292                 return 0;
1293         len -= p;
1294         if (len > count)
1295                 len = count;
1296         if (copy_to_user(buf, (void*)(tbuf+p), len))
1297                 return -EFAULT;
1298         *ppos += len;
1299         return len;
1300 }
1301
1302 static ssize_t write_flush(struct file * file, const char __user * buf,
1303                              size_t count, loff_t *ppos)
1304 {
1305         struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1306         char tbuf[20];
1307         char *ep;
1308         long flushtime;
1309         if (*ppos || count > sizeof(tbuf)-1)
1310                 return -EINVAL;
1311         if (copy_from_user(tbuf, buf, count))
1312                 return -EFAULT;
1313         tbuf[count] = 0;
1314         flushtime = simple_strtoul(tbuf, &ep, 0);
1315         if (*ep && *ep != '\n')
1316                 return -EINVAL;
1317
1318         cd->flush_time = flushtime;
1319         cd->nextcheck = get_seconds();
1320         cache_flush();
1321
1322         *ppos += count;
1323         return count;
1324 }
1325
1326 static const struct file_operations cache_flush_operations = {
1327         .open           = nonseekable_open,
1328         .read           = read_flush,
1329         .write          = write_flush,
1330 };