]> git.karo-electronics.de Git - mv-sheeva.git/blob - net/sunrpc/cache.c
svcrpc: avoid double reply caused by deferral race
[mv-sheeva.git] / net / sunrpc / cache.c
1 /*
2  * net/sunrpc/cache.c
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
36 #include "netns.h"
37
38 #define  RPCDBG_FACILITY RPCDBG_CACHE
39
40 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
41 static void cache_revisit_request(struct cache_head *item);
42
43 static void cache_init(struct cache_head *h)
44 {
45         time_t now = seconds_since_boot();
46         h->next = NULL;
47         h->flags = 0;
48         kref_init(&h->ref);
49         h->expiry_time = now + CACHE_NEW_EXPIRY;
50         h->last_refresh = now;
51 }
52
53 static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
54 {
55         return  (h->expiry_time < seconds_since_boot()) ||
56                 (detail->flush_time > h->last_refresh);
57 }
58
59 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
60                                        struct cache_head *key, int hash)
61 {
62         struct cache_head **head,  **hp;
63         struct cache_head *new = NULL, *freeme = NULL;
64
65         head = &detail->hash_table[hash];
66
67         read_lock(&detail->hash_lock);
68
69         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
70                 struct cache_head *tmp = *hp;
71                 if (detail->match(tmp, key)) {
72                         if (cache_is_expired(detail, tmp))
73                                 /* This entry is expired, we will discard it. */
74                                 break;
75                         cache_get(tmp);
76                         read_unlock(&detail->hash_lock);
77                         return tmp;
78                 }
79         }
80         read_unlock(&detail->hash_lock);
81         /* Didn't find anything, insert an empty entry */
82
83         new = detail->alloc();
84         if (!new)
85                 return NULL;
86         /* must fully initialise 'new', else
87          * we might get lose if we need to
88          * cache_put it soon.
89          */
90         cache_init(new);
91         detail->init(new, key);
92
93         write_lock(&detail->hash_lock);
94
95         /* check if entry appeared while we slept */
96         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
97                 struct cache_head *tmp = *hp;
98                 if (detail->match(tmp, key)) {
99                         if (cache_is_expired(detail, tmp)) {
100                                 *hp = tmp->next;
101                                 tmp->next = NULL;
102                                 detail->entries --;
103                                 freeme = tmp;
104                                 break;
105                         }
106                         cache_get(tmp);
107                         write_unlock(&detail->hash_lock);
108                         cache_put(new, detail);
109                         return tmp;
110                 }
111         }
112         new->next = *head;
113         *head = new;
114         detail->entries++;
115         cache_get(new);
116         write_unlock(&detail->hash_lock);
117
118         if (freeme)
119                 cache_put(freeme, detail);
120         return new;
121 }
122 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
123
124
125 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
126
127 static void cache_fresh_locked(struct cache_head *head, time_t expiry)
128 {
129         head->expiry_time = expiry;
130         head->last_refresh = seconds_since_boot();
131         set_bit(CACHE_VALID, &head->flags);
132 }
133
134 static void cache_fresh_unlocked(struct cache_head *head,
135                                  struct cache_detail *detail)
136 {
137         if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
138                 cache_revisit_request(head);
139                 cache_dequeue(detail, head);
140         }
141 }
142
143 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
144                                        struct cache_head *new, struct cache_head *old, int hash)
145 {
146         /* The 'old' entry is to be replaced by 'new'.
147          * If 'old' is not VALID, we update it directly,
148          * otherwise we need to replace it
149          */
150         struct cache_head **head;
151         struct cache_head *tmp;
152
153         if (!test_bit(CACHE_VALID, &old->flags)) {
154                 write_lock(&detail->hash_lock);
155                 if (!test_bit(CACHE_VALID, &old->flags)) {
156                         if (test_bit(CACHE_NEGATIVE, &new->flags))
157                                 set_bit(CACHE_NEGATIVE, &old->flags);
158                         else
159                                 detail->update(old, new);
160                         cache_fresh_locked(old, new->expiry_time);
161                         write_unlock(&detail->hash_lock);
162                         cache_fresh_unlocked(old, detail);
163                         return old;
164                 }
165                 write_unlock(&detail->hash_lock);
166         }
167         /* We need to insert a new entry */
168         tmp = detail->alloc();
169         if (!tmp) {
170                 cache_put(old, detail);
171                 return NULL;
172         }
173         cache_init(tmp);
174         detail->init(tmp, old);
175         head = &detail->hash_table[hash];
176
177         write_lock(&detail->hash_lock);
178         if (test_bit(CACHE_NEGATIVE, &new->flags))
179                 set_bit(CACHE_NEGATIVE, &tmp->flags);
180         else
181                 detail->update(tmp, new);
182         tmp->next = *head;
183         *head = tmp;
184         detail->entries++;
185         cache_get(tmp);
186         cache_fresh_locked(tmp, new->expiry_time);
187         cache_fresh_locked(old, 0);
188         write_unlock(&detail->hash_lock);
189         cache_fresh_unlocked(tmp, detail);
190         cache_fresh_unlocked(old, detail);
191         cache_put(old, detail);
192         return tmp;
193 }
194 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
195
196 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
197 {
198         if (!cd->cache_upcall)
199                 return -EINVAL;
200         return cd->cache_upcall(cd, h);
201 }
202
203 static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
204 {
205         if (!test_bit(CACHE_VALID, &h->flags))
206                 return -EAGAIN;
207         else {
208                 /* entry is valid */
209                 if (test_bit(CACHE_NEGATIVE, &h->flags))
210                         return -ENOENT;
211                 else
212                         return 0;
213         }
214 }
215
216 /*
217  * This is the generic cache management routine for all
218  * the authentication caches.
219  * It checks the currency of a cache item and will (later)
220  * initiate an upcall to fill it if needed.
221  *
222  *
223  * Returns 0 if the cache_head can be used, or cache_puts it and returns
224  * -EAGAIN if upcall is pending and request has been queued
225  * -ETIMEDOUT if upcall failed or request could not be queue or
226  *           upcall completed but item is still invalid (implying that
227  *           the cache item has been replaced with a newer one).
228  * -ENOENT if cache entry was negative
229  */
230 int cache_check(struct cache_detail *detail,
231                     struct cache_head *h, struct cache_req *rqstp)
232 {
233         int rv;
234         long refresh_age, age;
235
236         /* First decide return status as best we can */
237         rv = cache_is_valid(detail, h);
238
239         /* now see if we want to start an upcall */
240         refresh_age = (h->expiry_time - h->last_refresh);
241         age = seconds_since_boot() - h->last_refresh;
242
243         if (rqstp == NULL) {
244                 if (rv == -EAGAIN)
245                         rv = -ENOENT;
246         } else if (rv == -EAGAIN || age > refresh_age/2) {
247                 dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
248                                 refresh_age, age);
249                 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
250                         switch (cache_make_upcall(detail, h)) {
251                         case -EINVAL:
252                                 clear_bit(CACHE_PENDING, &h->flags);
253                                 cache_revisit_request(h);
254                                 if (rv == -EAGAIN) {
255                                         set_bit(CACHE_NEGATIVE, &h->flags);
256                                         cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
257                                         cache_fresh_unlocked(h, detail);
258                                         rv = -ENOENT;
259                                 }
260                                 break;
261
262                         case -EAGAIN:
263                                 clear_bit(CACHE_PENDING, &h->flags);
264                                 cache_revisit_request(h);
265                                 break;
266                         }
267                 }
268         }
269
270         if (rv == -EAGAIN) {
271                 if (!cache_defer_req(rqstp, h)) {
272                         /*
273                          * Request was not deferred; handle it as best
274                          * we can ourselves:
275                          */
276                         rv = cache_is_valid(detail, h);
277                         if (rv == -EAGAIN)
278                                 rv = -ETIMEDOUT;
279                 }
280         }
281         if (rv)
282                 cache_put(h, detail);
283         return rv;
284 }
285 EXPORT_SYMBOL_GPL(cache_check);
286
287 /*
288  * caches need to be periodically cleaned.
289  * For this we maintain a list of cache_detail and
290  * a current pointer into that list and into the table
291  * for that entry.
292  *
293  * Each time clean_cache is called it finds the next non-empty entry
294  * in the current table and walks the list in that entry
295  * looking for entries that can be removed.
296  *
297  * An entry gets removed if:
298  * - The expiry is before current time
299  * - The last_refresh time is before the flush_time for that cache
300  *
301  * later we might drop old entries with non-NEVER expiry if that table
302  * is getting 'full' for some definition of 'full'
303  *
304  * The question of "how often to scan a table" is an interesting one
305  * and is answered in part by the use of the "nextcheck" field in the
306  * cache_detail.
307  * When a scan of a table begins, the nextcheck field is set to a time
308  * that is well into the future.
309  * While scanning, if an expiry time is found that is earlier than the
310  * current nextcheck time, nextcheck is set to that expiry time.
311  * If the flush_time is ever set to a time earlier than the nextcheck
312  * time, the nextcheck time is then set to that flush_time.
313  *
314  * A table is then only scanned if the current time is at least
315  * the nextcheck time.
316  *
317  */
318
319 static LIST_HEAD(cache_list);
320 static DEFINE_SPINLOCK(cache_list_lock);
321 static struct cache_detail *current_detail;
322 static int current_index;
323
324 static void do_cache_clean(struct work_struct *work);
325 static struct delayed_work cache_cleaner;
326
327 static void sunrpc_init_cache_detail(struct cache_detail *cd)
328 {
329         rwlock_init(&cd->hash_lock);
330         INIT_LIST_HEAD(&cd->queue);
331         spin_lock(&cache_list_lock);
332         cd->nextcheck = 0;
333         cd->entries = 0;
334         atomic_set(&cd->readers, 0);
335         cd->last_close = 0;
336         cd->last_warn = -1;
337         list_add(&cd->others, &cache_list);
338         spin_unlock(&cache_list_lock);
339
340         /* start the cleaning process */
341         schedule_delayed_work(&cache_cleaner, 0);
342 }
343
344 static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
345 {
346         cache_purge(cd);
347         spin_lock(&cache_list_lock);
348         write_lock(&cd->hash_lock);
349         if (cd->entries || atomic_read(&cd->inuse)) {
350                 write_unlock(&cd->hash_lock);
351                 spin_unlock(&cache_list_lock);
352                 goto out;
353         }
354         if (current_detail == cd)
355                 current_detail = NULL;
356         list_del_init(&cd->others);
357         write_unlock(&cd->hash_lock);
358         spin_unlock(&cache_list_lock);
359         if (list_empty(&cache_list)) {
360                 /* module must be being unloaded so its safe to kill the worker */
361                 cancel_delayed_work_sync(&cache_cleaner);
362         }
363         return;
364 out:
365         printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
366 }
367
368 /* clean cache tries to find something to clean
369  * and cleans it.
370  * It returns 1 if it cleaned something,
371  *            0 if it didn't find anything this time
372  *           -1 if it fell off the end of the list.
373  */
374 static int cache_clean(void)
375 {
376         int rv = 0;
377         struct list_head *next;
378
379         spin_lock(&cache_list_lock);
380
381         /* find a suitable table if we don't already have one */
382         while (current_detail == NULL ||
383             current_index >= current_detail->hash_size) {
384                 if (current_detail)
385                         next = current_detail->others.next;
386                 else
387                         next = cache_list.next;
388                 if (next == &cache_list) {
389                         current_detail = NULL;
390                         spin_unlock(&cache_list_lock);
391                         return -1;
392                 }
393                 current_detail = list_entry(next, struct cache_detail, others);
394                 if (current_detail->nextcheck > seconds_since_boot())
395                         current_index = current_detail->hash_size;
396                 else {
397                         current_index = 0;
398                         current_detail->nextcheck = seconds_since_boot()+30*60;
399                 }
400         }
401
402         /* find a non-empty bucket in the table */
403         while (current_detail &&
404                current_index < current_detail->hash_size &&
405                current_detail->hash_table[current_index] == NULL)
406                 current_index++;
407
408         /* find a cleanable entry in the bucket and clean it, or set to next bucket */
409
410         if (current_detail && current_index < current_detail->hash_size) {
411                 struct cache_head *ch, **cp;
412                 struct cache_detail *d;
413
414                 write_lock(&current_detail->hash_lock);
415
416                 /* Ok, now to clean this strand */
417
418                 cp = & current_detail->hash_table[current_index];
419                 for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
420                         if (current_detail->nextcheck > ch->expiry_time)
421                                 current_detail->nextcheck = ch->expiry_time+1;
422                         if (!cache_is_expired(current_detail, ch))
423                                 continue;
424
425                         *cp = ch->next;
426                         ch->next = NULL;
427                         current_detail->entries--;
428                         rv = 1;
429                         break;
430                 }
431
432                 write_unlock(&current_detail->hash_lock);
433                 d = current_detail;
434                 if (!ch)
435                         current_index ++;
436                 spin_unlock(&cache_list_lock);
437                 if (ch) {
438                         if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
439                                 cache_dequeue(current_detail, ch);
440                         cache_revisit_request(ch);
441                         cache_put(ch, d);
442                 }
443         } else
444                 spin_unlock(&cache_list_lock);
445
446         return rv;
447 }
448
449 /*
450  * We want to regularly clean the cache, so we need to schedule some work ...
451  */
452 static void do_cache_clean(struct work_struct *work)
453 {
454         int delay = 5;
455         if (cache_clean() == -1)
456                 delay = round_jiffies_relative(30*HZ);
457
458         if (list_empty(&cache_list))
459                 delay = 0;
460
461         if (delay)
462                 schedule_delayed_work(&cache_cleaner, delay);
463 }
464
465
466 /*
467  * Clean all caches promptly.  This just calls cache_clean
468  * repeatedly until we are sure that every cache has had a chance to
469  * be fully cleaned
470  */
471 void cache_flush(void)
472 {
473         while (cache_clean() != -1)
474                 cond_resched();
475         while (cache_clean() != -1)
476                 cond_resched();
477 }
478 EXPORT_SYMBOL_GPL(cache_flush);
479
480 void cache_purge(struct cache_detail *detail)
481 {
482         detail->flush_time = LONG_MAX;
483         detail->nextcheck = seconds_since_boot();
484         cache_flush();
485         detail->flush_time = 1;
486 }
487 EXPORT_SYMBOL_GPL(cache_purge);
488
489
490 /*
491  * Deferral and Revisiting of Requests.
492  *
493  * If a cache lookup finds a pending entry, we
494  * need to defer the request and revisit it later.
495  * All deferred requests are stored in a hash table,
496  * indexed by "struct cache_head *".
497  * As it may be wasteful to store a whole request
498  * structure, we allow the request to provide a
499  * deferred form, which must contain a
500  * 'struct cache_deferred_req'
501  * This cache_deferred_req contains a method to allow
502  * it to be revisited when cache info is available
503  */
504
505 #define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
506 #define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
507
508 #define DFR_MAX 300     /* ??? */
509
510 static DEFINE_SPINLOCK(cache_defer_lock);
511 static LIST_HEAD(cache_defer_list);
512 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
513 static int cache_defer_cnt;
514
515 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
516 {
517         hlist_del_init(&dreq->hash);
518         if (!list_empty(&dreq->recent)) {
519                 list_del_init(&dreq->recent);
520                 cache_defer_cnt--;
521         }
522 }
523
524 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
525 {
526         int hash = DFR_HASH(item);
527
528         INIT_LIST_HEAD(&dreq->recent);
529         hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
530 }
531
532 static void setup_deferral(struct cache_deferred_req *dreq,
533                            struct cache_head *item,
534                            int count_me)
535 {
536
537         dreq->item = item;
538
539         spin_lock(&cache_defer_lock);
540
541         __hash_deferred_req(dreq, item);
542
543         if (count_me) {
544                 cache_defer_cnt++;
545                 list_add(&dreq->recent, &cache_defer_list);
546         }
547
548         spin_unlock(&cache_defer_lock);
549
550 }
551
552 struct thread_deferred_req {
553         struct cache_deferred_req handle;
554         struct completion completion;
555 };
556
557 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
558 {
559         struct thread_deferred_req *dr =
560                 container_of(dreq, struct thread_deferred_req, handle);
561         complete(&dr->completion);
562 }
563
564 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
565 {
566         struct thread_deferred_req sleeper;
567         struct cache_deferred_req *dreq = &sleeper.handle;
568
569         sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
570         dreq->revisit = cache_restart_thread;
571
572         setup_deferral(dreq, item, 0);
573
574         if (!test_bit(CACHE_PENDING, &item->flags) ||
575             wait_for_completion_interruptible_timeout(
576                     &sleeper.completion, req->thread_wait) <= 0) {
577                 /* The completion wasn't completed, so we need
578                  * to clean up
579                  */
580                 spin_lock(&cache_defer_lock);
581                 if (!hlist_unhashed(&sleeper.handle.hash)) {
582                         __unhash_deferred_req(&sleeper.handle);
583                         spin_unlock(&cache_defer_lock);
584                 } else {
585                         /* cache_revisit_request already removed
586                          * this from the hash table, but hasn't
587                          * called ->revisit yet.  It will very soon
588                          * and we need to wait for it.
589                          */
590                         spin_unlock(&cache_defer_lock);
591                         wait_for_completion(&sleeper.completion);
592                 }
593         }
594 }
595
596 static void cache_limit_defers(void)
597 {
598         /* Make sure we haven't exceed the limit of allowed deferred
599          * requests.
600          */
601         struct cache_deferred_req *discard = NULL;
602
603         if (cache_defer_cnt <= DFR_MAX)
604                 return;
605
606         spin_lock(&cache_defer_lock);
607
608         /* Consider removing either the first or the last */
609         if (cache_defer_cnt > DFR_MAX) {
610                 if (net_random() & 1)
611                         discard = list_entry(cache_defer_list.next,
612                                              struct cache_deferred_req, recent);
613                 else
614                         discard = list_entry(cache_defer_list.prev,
615                                              struct cache_deferred_req, recent);
616                 __unhash_deferred_req(discard);
617         }
618         spin_unlock(&cache_defer_lock);
619         if (discard)
620                 discard->revisit(discard, 1);
621 }
622
623 /* Return true if and only if a deferred request is queued. */
624 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
625 {
626         struct cache_deferred_req *dreq;
627
628         if (req->thread_wait) {
629                 cache_wait_req(req, item);
630                 if (!test_bit(CACHE_PENDING, &item->flags))
631                         return false;
632         }
633         dreq = req->defer(req);
634         if (dreq == NULL)
635                 return false;
636         setup_deferral(dreq, item, 1);
637         if (!test_bit(CACHE_PENDING, &item->flags))
638                 /* Bit could have been cleared before we managed to
639                  * set up the deferral, so need to revisit just in case
640                  */
641                 cache_revisit_request(item);
642
643         cache_limit_defers();
644         return true;
645 }
646
647 static void cache_revisit_request(struct cache_head *item)
648 {
649         struct cache_deferred_req *dreq;
650         struct list_head pending;
651         struct hlist_node *lp, *tmp;
652         int hash = DFR_HASH(item);
653
654         INIT_LIST_HEAD(&pending);
655         spin_lock(&cache_defer_lock);
656
657         hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
658                 if (dreq->item == item) {
659                         __unhash_deferred_req(dreq);
660                         list_add(&dreq->recent, &pending);
661                 }
662
663         spin_unlock(&cache_defer_lock);
664
665         while (!list_empty(&pending)) {
666                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
667                 list_del_init(&dreq->recent);
668                 dreq->revisit(dreq, 0);
669         }
670 }
671
672 void cache_clean_deferred(void *owner)
673 {
674         struct cache_deferred_req *dreq, *tmp;
675         struct list_head pending;
676
677
678         INIT_LIST_HEAD(&pending);
679         spin_lock(&cache_defer_lock);
680
681         list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
682                 if (dreq->owner == owner) {
683                         __unhash_deferred_req(dreq);
684                         list_add(&dreq->recent, &pending);
685                 }
686         }
687         spin_unlock(&cache_defer_lock);
688
689         while (!list_empty(&pending)) {
690                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
691                 list_del_init(&dreq->recent);
692                 dreq->revisit(dreq, 1);
693         }
694 }
695
696 /*
697  * communicate with user-space
698  *
699  * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
700  * On read, you get a full request, or block.
701  * On write, an update request is processed.
702  * Poll works if anything to read, and always allows write.
703  *
704  * Implemented by linked list of requests.  Each open file has
705  * a ->private that also exists in this list.  New requests are added
706  * to the end and may wakeup and preceding readers.
707  * New readers are added to the head.  If, on read, an item is found with
708  * CACHE_UPCALLING clear, we free it from the list.
709  *
710  */
711
712 static DEFINE_SPINLOCK(queue_lock);
713 static DEFINE_MUTEX(queue_io_mutex);
714
715 struct cache_queue {
716         struct list_head        list;
717         int                     reader; /* if 0, then request */
718 };
719 struct cache_request {
720         struct cache_queue      q;
721         struct cache_head       *item;
722         char                    * buf;
723         int                     len;
724         int                     readers;
725 };
726 struct cache_reader {
727         struct cache_queue      q;
728         int                     offset; /* if non-0, we have a refcnt on next request */
729 };
730
731 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
732                           loff_t *ppos, struct cache_detail *cd)
733 {
734         struct cache_reader *rp = filp->private_data;
735         struct cache_request *rq;
736         struct inode *inode = filp->f_path.dentry->d_inode;
737         int err;
738
739         if (count == 0)
740                 return 0;
741
742         mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
743                               * readers on this file */
744  again:
745         spin_lock(&queue_lock);
746         /* need to find next request */
747         while (rp->q.list.next != &cd->queue &&
748                list_entry(rp->q.list.next, struct cache_queue, list)
749                ->reader) {
750                 struct list_head *next = rp->q.list.next;
751                 list_move(&rp->q.list, next);
752         }
753         if (rp->q.list.next == &cd->queue) {
754                 spin_unlock(&queue_lock);
755                 mutex_unlock(&inode->i_mutex);
756                 BUG_ON(rp->offset);
757                 return 0;
758         }
759         rq = container_of(rp->q.list.next, struct cache_request, q.list);
760         BUG_ON(rq->q.reader);
761         if (rp->offset == 0)
762                 rq->readers++;
763         spin_unlock(&queue_lock);
764
765         if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
766                 err = -EAGAIN;
767                 spin_lock(&queue_lock);
768                 list_move(&rp->q.list, &rq->q.list);
769                 spin_unlock(&queue_lock);
770         } else {
771                 if (rp->offset + count > rq->len)
772                         count = rq->len - rp->offset;
773                 err = -EFAULT;
774                 if (copy_to_user(buf, rq->buf + rp->offset, count))
775                         goto out;
776                 rp->offset += count;
777                 if (rp->offset >= rq->len) {
778                         rp->offset = 0;
779                         spin_lock(&queue_lock);
780                         list_move(&rp->q.list, &rq->q.list);
781                         spin_unlock(&queue_lock);
782                 }
783                 err = 0;
784         }
785  out:
786         if (rp->offset == 0) {
787                 /* need to release rq */
788                 spin_lock(&queue_lock);
789                 rq->readers--;
790                 if (rq->readers == 0 &&
791                     !test_bit(CACHE_PENDING, &rq->item->flags)) {
792                         list_del(&rq->q.list);
793                         spin_unlock(&queue_lock);
794                         cache_put(rq->item, cd);
795                         kfree(rq->buf);
796                         kfree(rq);
797                 } else
798                         spin_unlock(&queue_lock);
799         }
800         if (err == -EAGAIN)
801                 goto again;
802         mutex_unlock(&inode->i_mutex);
803         return err ? err :  count;
804 }
805
806 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
807                                  size_t count, struct cache_detail *cd)
808 {
809         ssize_t ret;
810
811         if (copy_from_user(kaddr, buf, count))
812                 return -EFAULT;
813         kaddr[count] = '\0';
814         ret = cd->cache_parse(cd, kaddr, count);
815         if (!ret)
816                 ret = count;
817         return ret;
818 }
819
820 static ssize_t cache_slow_downcall(const char __user *buf,
821                                    size_t count, struct cache_detail *cd)
822 {
823         static char write_buf[8192]; /* protected by queue_io_mutex */
824         ssize_t ret = -EINVAL;
825
826         if (count >= sizeof(write_buf))
827                 goto out;
828         mutex_lock(&queue_io_mutex);
829         ret = cache_do_downcall(write_buf, buf, count, cd);
830         mutex_unlock(&queue_io_mutex);
831 out:
832         return ret;
833 }
834
835 static ssize_t cache_downcall(struct address_space *mapping,
836                               const char __user *buf,
837                               size_t count, struct cache_detail *cd)
838 {
839         struct page *page;
840         char *kaddr;
841         ssize_t ret = -ENOMEM;
842
843         if (count >= PAGE_CACHE_SIZE)
844                 goto out_slow;
845
846         page = find_or_create_page(mapping, 0, GFP_KERNEL);
847         if (!page)
848                 goto out_slow;
849
850         kaddr = kmap(page);
851         ret = cache_do_downcall(kaddr, buf, count, cd);
852         kunmap(page);
853         unlock_page(page);
854         page_cache_release(page);
855         return ret;
856 out_slow:
857         return cache_slow_downcall(buf, count, cd);
858 }
859
860 static ssize_t cache_write(struct file *filp, const char __user *buf,
861                            size_t count, loff_t *ppos,
862                            struct cache_detail *cd)
863 {
864         struct address_space *mapping = filp->f_mapping;
865         struct inode *inode = filp->f_path.dentry->d_inode;
866         ssize_t ret = -EINVAL;
867
868         if (!cd->cache_parse)
869                 goto out;
870
871         mutex_lock(&inode->i_mutex);
872         ret = cache_downcall(mapping, buf, count, cd);
873         mutex_unlock(&inode->i_mutex);
874 out:
875         return ret;
876 }
877
878 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
879
880 static unsigned int cache_poll(struct file *filp, poll_table *wait,
881                                struct cache_detail *cd)
882 {
883         unsigned int mask;
884         struct cache_reader *rp = filp->private_data;
885         struct cache_queue *cq;
886
887         poll_wait(filp, &queue_wait, wait);
888
889         /* alway allow write */
890         mask = POLL_OUT | POLLWRNORM;
891
892         if (!rp)
893                 return mask;
894
895         spin_lock(&queue_lock);
896
897         for (cq= &rp->q; &cq->list != &cd->queue;
898              cq = list_entry(cq->list.next, struct cache_queue, list))
899                 if (!cq->reader) {
900                         mask |= POLLIN | POLLRDNORM;
901                         break;
902                 }
903         spin_unlock(&queue_lock);
904         return mask;
905 }
906
907 static int cache_ioctl(struct inode *ino, struct file *filp,
908                        unsigned int cmd, unsigned long arg,
909                        struct cache_detail *cd)
910 {
911         int len = 0;
912         struct cache_reader *rp = filp->private_data;
913         struct cache_queue *cq;
914
915         if (cmd != FIONREAD || !rp)
916                 return -EINVAL;
917
918         spin_lock(&queue_lock);
919
920         /* only find the length remaining in current request,
921          * or the length of the next request
922          */
923         for (cq= &rp->q; &cq->list != &cd->queue;
924              cq = list_entry(cq->list.next, struct cache_queue, list))
925                 if (!cq->reader) {
926                         struct cache_request *cr =
927                                 container_of(cq, struct cache_request, q);
928                         len = cr->len - rp->offset;
929                         break;
930                 }
931         spin_unlock(&queue_lock);
932
933         return put_user(len, (int __user *)arg);
934 }
935
936 static int cache_open(struct inode *inode, struct file *filp,
937                       struct cache_detail *cd)
938 {
939         struct cache_reader *rp = NULL;
940
941         if (!cd || !try_module_get(cd->owner))
942                 return -EACCES;
943         nonseekable_open(inode, filp);
944         if (filp->f_mode & FMODE_READ) {
945                 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
946                 if (!rp)
947                         return -ENOMEM;
948                 rp->offset = 0;
949                 rp->q.reader = 1;
950                 atomic_inc(&cd->readers);
951                 spin_lock(&queue_lock);
952                 list_add(&rp->q.list, &cd->queue);
953                 spin_unlock(&queue_lock);
954         }
955         filp->private_data = rp;
956         return 0;
957 }
958
959 static int cache_release(struct inode *inode, struct file *filp,
960                          struct cache_detail *cd)
961 {
962         struct cache_reader *rp = filp->private_data;
963
964         if (rp) {
965                 spin_lock(&queue_lock);
966                 if (rp->offset) {
967                         struct cache_queue *cq;
968                         for (cq= &rp->q; &cq->list != &cd->queue;
969                              cq = list_entry(cq->list.next, struct cache_queue, list))
970                                 if (!cq->reader) {
971                                         container_of(cq, struct cache_request, q)
972                                                 ->readers--;
973                                         break;
974                                 }
975                         rp->offset = 0;
976                 }
977                 list_del(&rp->q.list);
978                 spin_unlock(&queue_lock);
979
980                 filp->private_data = NULL;
981                 kfree(rp);
982
983                 cd->last_close = seconds_since_boot();
984                 atomic_dec(&cd->readers);
985         }
986         module_put(cd->owner);
987         return 0;
988 }
989
990
991
992 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
993 {
994         struct cache_queue *cq;
995         spin_lock(&queue_lock);
996         list_for_each_entry(cq, &detail->queue, list)
997                 if (!cq->reader) {
998                         struct cache_request *cr = container_of(cq, struct cache_request, q);
999                         if (cr->item != ch)
1000                                 continue;
1001                         if (cr->readers != 0)
1002                                 continue;
1003                         list_del(&cr->q.list);
1004                         spin_unlock(&queue_lock);
1005                         cache_put(cr->item, detail);
1006                         kfree(cr->buf);
1007                         kfree(cr);
1008                         return;
1009                 }
1010         spin_unlock(&queue_lock);
1011 }
1012
1013 /*
1014  * Support routines for text-based upcalls.
1015  * Fields are separated by spaces.
1016  * Fields are either mangled to quote space tab newline slosh with slosh
1017  * or a hexified with a leading \x
1018  * Record is terminated with newline.
1019  *
1020  */
1021
1022 void qword_add(char **bpp, int *lp, char *str)
1023 {
1024         char *bp = *bpp;
1025         int len = *lp;
1026         char c;
1027
1028         if (len < 0) return;
1029
1030         while ((c=*str++) && len)
1031                 switch(c) {
1032                 case ' ':
1033                 case '\t':
1034                 case '\n':
1035                 case '\\':
1036                         if (len >= 4) {
1037                                 *bp++ = '\\';
1038                                 *bp++ = '0' + ((c & 0300)>>6);
1039                                 *bp++ = '0' + ((c & 0070)>>3);
1040                                 *bp++ = '0' + ((c & 0007)>>0);
1041                         }
1042                         len -= 4;
1043                         break;
1044                 default:
1045                         *bp++ = c;
1046                         len--;
1047                 }
1048         if (c || len <1) len = -1;
1049         else {
1050                 *bp++ = ' ';
1051                 len--;
1052         }
1053         *bpp = bp;
1054         *lp = len;
1055 }
1056 EXPORT_SYMBOL_GPL(qword_add);
1057
1058 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1059 {
1060         char *bp = *bpp;
1061         int len = *lp;
1062
1063         if (len < 0) return;
1064
1065         if (len > 2) {
1066                 *bp++ = '\\';
1067                 *bp++ = 'x';
1068                 len -= 2;
1069                 while (blen && len >= 2) {
1070                         unsigned char c = *buf++;
1071                         *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
1072                         *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
1073                         len -= 2;
1074                         blen--;
1075                 }
1076         }
1077         if (blen || len<1) len = -1;
1078         else {
1079                 *bp++ = ' ';
1080                 len--;
1081         }
1082         *bpp = bp;
1083         *lp = len;
1084 }
1085 EXPORT_SYMBOL_GPL(qword_addhex);
1086
1087 static void warn_no_listener(struct cache_detail *detail)
1088 {
1089         if (detail->last_warn != detail->last_close) {
1090                 detail->last_warn = detail->last_close;
1091                 if (detail->warn_no_listener)
1092                         detail->warn_no_listener(detail, detail->last_close != 0);
1093         }
1094 }
1095
1096 static bool cache_listeners_exist(struct cache_detail *detail)
1097 {
1098         if (atomic_read(&detail->readers))
1099                 return true;
1100         if (detail->last_close == 0)
1101                 /* This cache was never opened */
1102                 return false;
1103         if (detail->last_close < seconds_since_boot() - 30)
1104                 /*
1105                  * We allow for the possibility that someone might
1106                  * restart a userspace daemon without restarting the
1107                  * server; but after 30 seconds, we give up.
1108                  */
1109                  return false;
1110         return true;
1111 }
1112
1113 /*
1114  * register an upcall request to user-space and queue it up for read() by the
1115  * upcall daemon.
1116  *
1117  * Each request is at most one page long.
1118  */
1119 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1120                 void (*cache_request)(struct cache_detail *,
1121                                       struct cache_head *,
1122                                       char **,
1123                                       int *))
1124 {
1125
1126         char *buf;
1127         struct cache_request *crq;
1128         char *bp;
1129         int len;
1130
1131         if (!cache_listeners_exist(detail)) {
1132                 warn_no_listener(detail);
1133                 return -EINVAL;
1134         }
1135
1136         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1137         if (!buf)
1138                 return -EAGAIN;
1139
1140         crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1141         if (!crq) {
1142                 kfree(buf);
1143                 return -EAGAIN;
1144         }
1145
1146         bp = buf; len = PAGE_SIZE;
1147
1148         cache_request(detail, h, &bp, &len);
1149
1150         if (len < 0) {
1151                 kfree(buf);
1152                 kfree(crq);
1153                 return -EAGAIN;
1154         }
1155         crq->q.reader = 0;
1156         crq->item = cache_get(h);
1157         crq->buf = buf;
1158         crq->len = PAGE_SIZE - len;
1159         crq->readers = 0;
1160         spin_lock(&queue_lock);
1161         list_add_tail(&crq->q.list, &detail->queue);
1162         spin_unlock(&queue_lock);
1163         wake_up(&queue_wait);
1164         return 0;
1165 }
1166 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1167
1168 /*
1169  * parse a message from user-space and pass it
1170  * to an appropriate cache
1171  * Messages are, like requests, separated into fields by
1172  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1173  *
1174  * Message is
1175  *   reply cachename expiry key ... content....
1176  *
1177  * key and content are both parsed by cache
1178  */
1179
1180 #define isodigit(c) (isdigit(c) && c <= '7')
1181 int qword_get(char **bpp, char *dest, int bufsize)
1182 {
1183         /* return bytes copied, or -1 on error */
1184         char *bp = *bpp;
1185         int len = 0;
1186
1187         while (*bp == ' ') bp++;
1188
1189         if (bp[0] == '\\' && bp[1] == 'x') {
1190                 /* HEX STRING */
1191                 bp += 2;
1192                 while (len < bufsize) {
1193                         int h, l;
1194
1195                         h = hex_to_bin(bp[0]);
1196                         if (h < 0)
1197                                 break;
1198
1199                         l = hex_to_bin(bp[1]);
1200                         if (l < 0)
1201                                 break;
1202
1203                         *dest++ = (h << 4) | l;
1204                         bp += 2;
1205                         len++;
1206                 }
1207         } else {
1208                 /* text with \nnn octal quoting */
1209                 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1210                         if (*bp == '\\' &&
1211                             isodigit(bp[1]) && (bp[1] <= '3') &&
1212                             isodigit(bp[2]) &&
1213                             isodigit(bp[3])) {
1214                                 int byte = (*++bp -'0');
1215                                 bp++;
1216                                 byte = (byte << 3) | (*bp++ - '0');
1217                                 byte = (byte << 3) | (*bp++ - '0');
1218                                 *dest++ = byte;
1219                                 len++;
1220                         } else {
1221                                 *dest++ = *bp++;
1222                                 len++;
1223                         }
1224                 }
1225         }
1226
1227         if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1228                 return -1;
1229         while (*bp == ' ') bp++;
1230         *bpp = bp;
1231         *dest = '\0';
1232         return len;
1233 }
1234 EXPORT_SYMBOL_GPL(qword_get);
1235
1236
1237 /*
1238  * support /proc/sunrpc/cache/$CACHENAME/content
1239  * as a seqfile.
1240  * We call ->cache_show passing NULL for the item to
1241  * get a header, then pass each real item in the cache
1242  */
1243
1244 struct handle {
1245         struct cache_detail *cd;
1246 };
1247
1248 static void *c_start(struct seq_file *m, loff_t *pos)
1249         __acquires(cd->hash_lock)
1250 {
1251         loff_t n = *pos;
1252         unsigned hash, entry;
1253         struct cache_head *ch;
1254         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1255
1256
1257         read_lock(&cd->hash_lock);
1258         if (!n--)
1259                 return SEQ_START_TOKEN;
1260         hash = n >> 32;
1261         entry = n & ((1LL<<32) - 1);
1262
1263         for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1264                 if (!entry--)
1265                         return ch;
1266         n &= ~((1LL<<32) - 1);
1267         do {
1268                 hash++;
1269                 n += 1LL<<32;
1270         } while(hash < cd->hash_size &&
1271                 cd->hash_table[hash]==NULL);
1272         if (hash >= cd->hash_size)
1273                 return NULL;
1274         *pos = n+1;
1275         return cd->hash_table[hash];
1276 }
1277
1278 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1279 {
1280         struct cache_head *ch = p;
1281         int hash = (*pos >> 32);
1282         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1283
1284         if (p == SEQ_START_TOKEN)
1285                 hash = 0;
1286         else if (ch->next == NULL) {
1287                 hash++;
1288                 *pos += 1LL<<32;
1289         } else {
1290                 ++*pos;
1291                 return ch->next;
1292         }
1293         *pos &= ~((1LL<<32) - 1);
1294         while (hash < cd->hash_size &&
1295                cd->hash_table[hash] == NULL) {
1296                 hash++;
1297                 *pos += 1LL<<32;
1298         }
1299         if (hash >= cd->hash_size)
1300                 return NULL;
1301         ++*pos;
1302         return cd->hash_table[hash];
1303 }
1304
1305 static void c_stop(struct seq_file *m, void *p)
1306         __releases(cd->hash_lock)
1307 {
1308         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1309         read_unlock(&cd->hash_lock);
1310 }
1311
1312 static int c_show(struct seq_file *m, void *p)
1313 {
1314         struct cache_head *cp = p;
1315         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1316
1317         if (p == SEQ_START_TOKEN)
1318                 return cd->cache_show(m, cd, NULL);
1319
1320         ifdebug(CACHE)
1321                 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1322                            convert_to_wallclock(cp->expiry_time),
1323                            atomic_read(&cp->ref.refcount), cp->flags);
1324         cache_get(cp);
1325         if (cache_check(cd, cp, NULL))
1326                 /* cache_check does a cache_put on failure */
1327                 seq_printf(m, "# ");
1328         else
1329                 cache_put(cp, cd);
1330
1331         return cd->cache_show(m, cd, cp);
1332 }
1333
1334 static const struct seq_operations cache_content_op = {
1335         .start  = c_start,
1336         .next   = c_next,
1337         .stop   = c_stop,
1338         .show   = c_show,
1339 };
1340
1341 static int content_open(struct inode *inode, struct file *file,
1342                         struct cache_detail *cd)
1343 {
1344         struct handle *han;
1345
1346         if (!cd || !try_module_get(cd->owner))
1347                 return -EACCES;
1348         han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1349         if (han == NULL) {
1350                 module_put(cd->owner);
1351                 return -ENOMEM;
1352         }
1353
1354         han->cd = cd;
1355         return 0;
1356 }
1357
1358 static int content_release(struct inode *inode, struct file *file,
1359                 struct cache_detail *cd)
1360 {
1361         int ret = seq_release_private(inode, file);
1362         module_put(cd->owner);
1363         return ret;
1364 }
1365
1366 static int open_flush(struct inode *inode, struct file *file,
1367                         struct cache_detail *cd)
1368 {
1369         if (!cd || !try_module_get(cd->owner))
1370                 return -EACCES;
1371         return nonseekable_open(inode, file);
1372 }
1373
1374 static int release_flush(struct inode *inode, struct file *file,
1375                         struct cache_detail *cd)
1376 {
1377         module_put(cd->owner);
1378         return 0;
1379 }
1380
1381 static ssize_t read_flush(struct file *file, char __user *buf,
1382                           size_t count, loff_t *ppos,
1383                           struct cache_detail *cd)
1384 {
1385         char tbuf[20];
1386         unsigned long p = *ppos;
1387         size_t len;
1388
1389         sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
1390         len = strlen(tbuf);
1391         if (p >= len)
1392                 return 0;
1393         len -= p;
1394         if (len > count)
1395                 len = count;
1396         if (copy_to_user(buf, (void*)(tbuf+p), len))
1397                 return -EFAULT;
1398         *ppos += len;
1399         return len;
1400 }
1401
1402 static ssize_t write_flush(struct file *file, const char __user *buf,
1403                            size_t count, loff_t *ppos,
1404                            struct cache_detail *cd)
1405 {
1406         char tbuf[20];
1407         char *bp, *ep;
1408
1409         if (*ppos || count > sizeof(tbuf)-1)
1410                 return -EINVAL;
1411         if (copy_from_user(tbuf, buf, count))
1412                 return -EFAULT;
1413         tbuf[count] = 0;
1414         simple_strtoul(tbuf, &ep, 0);
1415         if (*ep && *ep != '\n')
1416                 return -EINVAL;
1417
1418         bp = tbuf;
1419         cd->flush_time = get_expiry(&bp);
1420         cd->nextcheck = seconds_since_boot();
1421         cache_flush();
1422
1423         *ppos += count;
1424         return count;
1425 }
1426
1427 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1428                                  size_t count, loff_t *ppos)
1429 {
1430         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1431
1432         return cache_read(filp, buf, count, ppos, cd);
1433 }
1434
1435 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1436                                   size_t count, loff_t *ppos)
1437 {
1438         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1439
1440         return cache_write(filp, buf, count, ppos, cd);
1441 }
1442
1443 static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1444 {
1445         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1446
1447         return cache_poll(filp, wait, cd);
1448 }
1449
1450 static long cache_ioctl_procfs(struct file *filp,
1451                                unsigned int cmd, unsigned long arg)
1452 {
1453         struct inode *inode = filp->f_path.dentry->d_inode;
1454         struct cache_detail *cd = PDE(inode)->data;
1455
1456         return cache_ioctl(inode, filp, cmd, arg, cd);
1457 }
1458
1459 static int cache_open_procfs(struct inode *inode, struct file *filp)
1460 {
1461         struct cache_detail *cd = PDE(inode)->data;
1462
1463         return cache_open(inode, filp, cd);
1464 }
1465
1466 static int cache_release_procfs(struct inode *inode, struct file *filp)
1467 {
1468         struct cache_detail *cd = PDE(inode)->data;
1469
1470         return cache_release(inode, filp, cd);
1471 }
1472
1473 static const struct file_operations cache_file_operations_procfs = {
1474         .owner          = THIS_MODULE,
1475         .llseek         = no_llseek,
1476         .read           = cache_read_procfs,
1477         .write          = cache_write_procfs,
1478         .poll           = cache_poll_procfs,
1479         .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1480         .open           = cache_open_procfs,
1481         .release        = cache_release_procfs,
1482 };
1483
1484 static int content_open_procfs(struct inode *inode, struct file *filp)
1485 {
1486         struct cache_detail *cd = PDE(inode)->data;
1487
1488         return content_open(inode, filp, cd);
1489 }
1490
1491 static int content_release_procfs(struct inode *inode, struct file *filp)
1492 {
1493         struct cache_detail *cd = PDE(inode)->data;
1494
1495         return content_release(inode, filp, cd);
1496 }
1497
1498 static const struct file_operations content_file_operations_procfs = {
1499         .open           = content_open_procfs,
1500         .read           = seq_read,
1501         .llseek         = seq_lseek,
1502         .release        = content_release_procfs,
1503 };
1504
1505 static int open_flush_procfs(struct inode *inode, struct file *filp)
1506 {
1507         struct cache_detail *cd = PDE(inode)->data;
1508
1509         return open_flush(inode, filp, cd);
1510 }
1511
1512 static int release_flush_procfs(struct inode *inode, struct file *filp)
1513 {
1514         struct cache_detail *cd = PDE(inode)->data;
1515
1516         return release_flush(inode, filp, cd);
1517 }
1518
1519 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1520                             size_t count, loff_t *ppos)
1521 {
1522         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1523
1524         return read_flush(filp, buf, count, ppos, cd);
1525 }
1526
1527 static ssize_t write_flush_procfs(struct file *filp,
1528                                   const char __user *buf,
1529                                   size_t count, loff_t *ppos)
1530 {
1531         struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1532
1533         return write_flush(filp, buf, count, ppos, cd);
1534 }
1535
1536 static const struct file_operations cache_flush_operations_procfs = {
1537         .open           = open_flush_procfs,
1538         .read           = read_flush_procfs,
1539         .write          = write_flush_procfs,
1540         .release        = release_flush_procfs,
1541         .llseek         = no_llseek,
1542 };
1543
1544 static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1545 {
1546         struct sunrpc_net *sn;
1547
1548         if (cd->u.procfs.proc_ent == NULL)
1549                 return;
1550         if (cd->u.procfs.flush_ent)
1551                 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1552         if (cd->u.procfs.channel_ent)
1553                 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1554         if (cd->u.procfs.content_ent)
1555                 remove_proc_entry("content", cd->u.procfs.proc_ent);
1556         cd->u.procfs.proc_ent = NULL;
1557         sn = net_generic(net, sunrpc_net_id);
1558         remove_proc_entry(cd->name, sn->proc_net_rpc);
1559 }
1560
1561 #ifdef CONFIG_PROC_FS
1562 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1563 {
1564         struct proc_dir_entry *p;
1565         struct sunrpc_net *sn;
1566
1567         sn = net_generic(net, sunrpc_net_id);
1568         cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1569         if (cd->u.procfs.proc_ent == NULL)
1570                 goto out_nomem;
1571         cd->u.procfs.channel_ent = NULL;
1572         cd->u.procfs.content_ent = NULL;
1573
1574         p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1575                              cd->u.procfs.proc_ent,
1576                              &cache_flush_operations_procfs, cd);
1577         cd->u.procfs.flush_ent = p;
1578         if (p == NULL)
1579                 goto out_nomem;
1580
1581         if (cd->cache_upcall || cd->cache_parse) {
1582                 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1583                                      cd->u.procfs.proc_ent,
1584                                      &cache_file_operations_procfs, cd);
1585                 cd->u.procfs.channel_ent = p;
1586                 if (p == NULL)
1587                         goto out_nomem;
1588         }
1589         if (cd->cache_show) {
1590                 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
1591                                 cd->u.procfs.proc_ent,
1592                                 &content_file_operations_procfs, cd);
1593                 cd->u.procfs.content_ent = p;
1594                 if (p == NULL)
1595                         goto out_nomem;
1596         }
1597         return 0;
1598 out_nomem:
1599         remove_cache_proc_entries(cd, net);
1600         return -ENOMEM;
1601 }
1602 #else /* CONFIG_PROC_FS */
1603 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1604 {
1605         return 0;
1606 }
1607 #endif
1608
1609 void __init cache_initialize(void)
1610 {
1611         INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
1612 }
1613
1614 int cache_register_net(struct cache_detail *cd, struct net *net)
1615 {
1616         int ret;
1617
1618         sunrpc_init_cache_detail(cd);
1619         ret = create_cache_proc_entries(cd, net);
1620         if (ret)
1621                 sunrpc_destroy_cache_detail(cd);
1622         return ret;
1623 }
1624
1625 int cache_register(struct cache_detail *cd)
1626 {
1627         return cache_register_net(cd, &init_net);
1628 }
1629 EXPORT_SYMBOL_GPL(cache_register);
1630
1631 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1632 {
1633         remove_cache_proc_entries(cd, net);
1634         sunrpc_destroy_cache_detail(cd);
1635 }
1636
1637 void cache_unregister(struct cache_detail *cd)
1638 {
1639         cache_unregister_net(cd, &init_net);
1640 }
1641 EXPORT_SYMBOL_GPL(cache_unregister);
1642
1643 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1644                                  size_t count, loff_t *ppos)
1645 {
1646         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1647
1648         return cache_read(filp, buf, count, ppos, cd);
1649 }
1650
1651 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1652                                   size_t count, loff_t *ppos)
1653 {
1654         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1655
1656         return cache_write(filp, buf, count, ppos, cd);
1657 }
1658
1659 static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1660 {
1661         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1662
1663         return cache_poll(filp, wait, cd);
1664 }
1665
1666 static long cache_ioctl_pipefs(struct file *filp,
1667                               unsigned int cmd, unsigned long arg)
1668 {
1669         struct inode *inode = filp->f_dentry->d_inode;
1670         struct cache_detail *cd = RPC_I(inode)->private;
1671
1672         return cache_ioctl(inode, filp, cmd, arg, cd);
1673 }
1674
1675 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1676 {
1677         struct cache_detail *cd = RPC_I(inode)->private;
1678
1679         return cache_open(inode, filp, cd);
1680 }
1681
1682 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1683 {
1684         struct cache_detail *cd = RPC_I(inode)->private;
1685
1686         return cache_release(inode, filp, cd);
1687 }
1688
1689 const struct file_operations cache_file_operations_pipefs = {
1690         .owner          = THIS_MODULE,
1691         .llseek         = no_llseek,
1692         .read           = cache_read_pipefs,
1693         .write          = cache_write_pipefs,
1694         .poll           = cache_poll_pipefs,
1695         .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1696         .open           = cache_open_pipefs,
1697         .release        = cache_release_pipefs,
1698 };
1699
1700 static int content_open_pipefs(struct inode *inode, struct file *filp)
1701 {
1702         struct cache_detail *cd = RPC_I(inode)->private;
1703
1704         return content_open(inode, filp, cd);
1705 }
1706
1707 static int content_release_pipefs(struct inode *inode, struct file *filp)
1708 {
1709         struct cache_detail *cd = RPC_I(inode)->private;
1710
1711         return content_release(inode, filp, cd);
1712 }
1713
1714 const struct file_operations content_file_operations_pipefs = {
1715         .open           = content_open_pipefs,
1716         .read           = seq_read,
1717         .llseek         = seq_lseek,
1718         .release        = content_release_pipefs,
1719 };
1720
1721 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1722 {
1723         struct cache_detail *cd = RPC_I(inode)->private;
1724
1725         return open_flush(inode, filp, cd);
1726 }
1727
1728 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1729 {
1730         struct cache_detail *cd = RPC_I(inode)->private;
1731
1732         return release_flush(inode, filp, cd);
1733 }
1734
1735 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1736                             size_t count, loff_t *ppos)
1737 {
1738         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1739
1740         return read_flush(filp, buf, count, ppos, cd);
1741 }
1742
1743 static ssize_t write_flush_pipefs(struct file *filp,
1744                                   const char __user *buf,
1745                                   size_t count, loff_t *ppos)
1746 {
1747         struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1748
1749         return write_flush(filp, buf, count, ppos, cd);
1750 }
1751
1752 const struct file_operations cache_flush_operations_pipefs = {
1753         .open           = open_flush_pipefs,
1754         .read           = read_flush_pipefs,
1755         .write          = write_flush_pipefs,
1756         .release        = release_flush_pipefs,
1757         .llseek         = no_llseek,
1758 };
1759
1760 int sunrpc_cache_register_pipefs(struct dentry *parent,
1761                                  const char *name, mode_t umode,
1762                                  struct cache_detail *cd)
1763 {
1764         struct qstr q;
1765         struct dentry *dir;
1766         int ret = 0;
1767
1768         sunrpc_init_cache_detail(cd);
1769         q.name = name;
1770         q.len = strlen(name);
1771         q.hash = full_name_hash(q.name, q.len);
1772         dir = rpc_create_cache_dir(parent, &q, umode, cd);
1773         if (!IS_ERR(dir))
1774                 cd->u.pipefs.dir = dir;
1775         else {
1776                 sunrpc_destroy_cache_detail(cd);
1777                 ret = PTR_ERR(dir);
1778         }
1779         return ret;
1780 }
1781 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1782
1783 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1784 {
1785         rpc_remove_cache_dir(cd->u.pipefs.dir);
1786         cd->u.pipefs.dir = NULL;
1787         sunrpc_destroy_cache_detail(cd);
1788 }
1789 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1790