1 // SPDX-License-Identifier: GPL-2.0-only
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
11 #include <linux/types.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/string_helpers.h>
22 #include <linux/uaccess.h>
23 #include <linux/poll.h>
24 #include <linux/seq_file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/net.h>
27 #include <linux/workqueue.h>
28 #include <linux/mutex.h>
29 #include <linux/pagemap.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
37 #define RPCDBG_FACILITY RPCDBG_CACHE
39 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
40 static void cache_revisit_request(struct cache_head *item);
41 static bool cache_listeners_exist(struct cache_detail *detail);
43 static void cache_init(struct cache_head *h, struct cache_detail *detail)
45 time64_t now = seconds_since_boot();
46 INIT_HLIST_NODE(&h->cache_list);
49 h->expiry_time = now + CACHE_NEW_EXPIRY;
50 if (now <= detail->flush_time)
51 /* ensure it isn't already expired */
52 now = detail->flush_time + 1;
53 h->last_refresh = now;
56 static void cache_fresh_unlocked(struct cache_head *head,
57 struct cache_detail *detail);
59 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
60 struct cache_head *key,
63 struct hlist_head *head = &detail->hash_table[hash];
64 struct cache_head *tmp;
67 hlist_for_each_entry_rcu(tmp, head, cache_list) {
68 if (detail->match(tmp, key)) {
69 if (cache_is_expired(detail, tmp))
71 tmp = cache_get_rcu(tmp);
80 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
81 struct cache_detail *cd)
83 /* Must be called under cd->hash_lock */
84 hlist_del_init_rcu(&ch->cache_list);
85 set_bit(CACHE_CLEANED, &ch->flags);
89 static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
90 struct cache_detail *cd)
92 cache_fresh_unlocked(ch, cd);
96 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
97 struct cache_head *key,
100 struct cache_head *new, *tmp, *freeme = NULL;
101 struct hlist_head *head = &detail->hash_table[hash];
103 new = detail->alloc();
106 /* must fully initialise 'new', else
107 * we might get lose if we need to
110 cache_init(new, detail);
111 detail->init(new, key);
113 spin_lock(&detail->hash_lock);
115 /* check if entry appeared while we slept */
116 hlist_for_each_entry_rcu(tmp, head, cache_list) {
117 if (detail->match(tmp, key)) {
118 if (cache_is_expired(detail, tmp)) {
119 sunrpc_begin_cache_remove_entry(tmp, detail);
124 spin_unlock(&detail->hash_lock);
125 cache_put(new, detail);
130 hlist_add_head_rcu(&new->cache_list, head);
133 spin_unlock(&detail->hash_lock);
136 sunrpc_end_cache_remove_entry(freeme, detail);
140 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
141 struct cache_head *key, int hash)
143 struct cache_head *ret;
145 ret = sunrpc_cache_find_rcu(detail, key, hash);
148 /* Didn't find anything, insert an empty entry */
149 return sunrpc_cache_add_entry(detail, key, hash);
151 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
153 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
155 static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
156 struct cache_detail *detail)
158 time64_t now = seconds_since_boot();
159 if (now <= detail->flush_time)
160 /* ensure it isn't immediately treated as expired */
161 now = detail->flush_time + 1;
162 head->expiry_time = expiry;
163 head->last_refresh = now;
164 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
165 set_bit(CACHE_VALID, &head->flags);
168 static void cache_fresh_unlocked(struct cache_head *head,
169 struct cache_detail *detail)
171 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
172 cache_revisit_request(head);
173 cache_dequeue(detail, head);
177 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
178 struct cache_head *new, struct cache_head *old, int hash)
180 /* The 'old' entry is to be replaced by 'new'.
181 * If 'old' is not VALID, we update it directly,
182 * otherwise we need to replace it
184 struct cache_head *tmp;
186 if (!test_bit(CACHE_VALID, &old->flags)) {
187 spin_lock(&detail->hash_lock);
188 if (!test_bit(CACHE_VALID, &old->flags)) {
189 if (test_bit(CACHE_NEGATIVE, &new->flags))
190 set_bit(CACHE_NEGATIVE, &old->flags);
192 detail->update(old, new);
193 cache_fresh_locked(old, new->expiry_time, detail);
194 spin_unlock(&detail->hash_lock);
195 cache_fresh_unlocked(old, detail);
198 spin_unlock(&detail->hash_lock);
200 /* We need to insert a new entry */
201 tmp = detail->alloc();
203 cache_put(old, detail);
206 cache_init(tmp, detail);
207 detail->init(tmp, old);
209 spin_lock(&detail->hash_lock);
210 if (test_bit(CACHE_NEGATIVE, &new->flags))
211 set_bit(CACHE_NEGATIVE, &tmp->flags);
213 detail->update(tmp, new);
214 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
217 cache_fresh_locked(tmp, new->expiry_time, detail);
218 cache_fresh_locked(old, 0, detail);
219 spin_unlock(&detail->hash_lock);
220 cache_fresh_unlocked(tmp, detail);
221 cache_fresh_unlocked(old, detail);
222 cache_put(old, detail);
225 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
227 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
229 if (cd->cache_upcall)
230 return cd->cache_upcall(cd, h);
231 return sunrpc_cache_pipe_upcall(cd, h);
234 static inline int cache_is_valid(struct cache_head *h)
236 if (!test_bit(CACHE_VALID, &h->flags))
240 if (test_bit(CACHE_NEGATIVE, &h->flags))
244 * In combination with write barrier in
245 * sunrpc_cache_update, ensures that anyone
246 * using the cache entry after this sees the
255 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
259 spin_lock(&detail->hash_lock);
260 rv = cache_is_valid(h);
262 set_bit(CACHE_NEGATIVE, &h->flags);
263 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
267 spin_unlock(&detail->hash_lock);
268 cache_fresh_unlocked(h, detail);
273 * This is the generic cache management routine for all
274 * the authentication caches.
275 * It checks the currency of a cache item and will (later)
276 * initiate an upcall to fill it if needed.
279 * Returns 0 if the cache_head can be used, or cache_puts it and returns
280 * -EAGAIN if upcall is pending and request has been queued
281 * -ETIMEDOUT if upcall failed or request could not be queue or
282 * upcall completed but item is still invalid (implying that
283 * the cache item has been replaced with a newer one).
284 * -ENOENT if cache entry was negative
286 int cache_check(struct cache_detail *detail,
287 struct cache_head *h, struct cache_req *rqstp)
290 time64_t refresh_age, age;
292 /* First decide return status as best we can */
293 rv = cache_is_valid(h);
295 /* now see if we want to start an upcall */
296 refresh_age = (h->expiry_time - h->last_refresh);
297 age = seconds_since_boot() - h->last_refresh;
302 } else if (rv == -EAGAIN ||
303 (h->expiry_time != 0 && age > refresh_age/2)) {
304 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
306 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
307 switch (cache_make_upcall(detail, h)) {
309 rv = try_to_negate_entry(detail, h);
312 cache_fresh_unlocked(h, detail);
315 } else if (!cache_listeners_exist(detail))
316 rv = try_to_negate_entry(detail, h);
320 if (!cache_defer_req(rqstp, h)) {
322 * Request was not deferred; handle it as best
325 rv = cache_is_valid(h);
331 cache_put(h, detail);
334 EXPORT_SYMBOL_GPL(cache_check);
337 * caches need to be periodically cleaned.
338 * For this we maintain a list of cache_detail and
339 * a current pointer into that list and into the table
342 * Each time cache_clean is called it finds the next non-empty entry
343 * in the current table and walks the list in that entry
344 * looking for entries that can be removed.
346 * An entry gets removed if:
347 * - The expiry is before current time
348 * - The last_refresh time is before the flush_time for that cache
350 * later we might drop old entries with non-NEVER expiry if that table
351 * is getting 'full' for some definition of 'full'
353 * The question of "how often to scan a table" is an interesting one
354 * and is answered in part by the use of the "nextcheck" field in the
356 * When a scan of a table begins, the nextcheck field is set to a time
357 * that is well into the future.
358 * While scanning, if an expiry time is found that is earlier than the
359 * current nextcheck time, nextcheck is set to that expiry time.
360 * If the flush_time is ever set to a time earlier than the nextcheck
361 * time, the nextcheck time is then set to that flush_time.
363 * A table is then only scanned if the current time is at least
364 * the nextcheck time.
368 static LIST_HEAD(cache_list);
369 static DEFINE_SPINLOCK(cache_list_lock);
370 static struct cache_detail *current_detail;
371 static int current_index;
373 static void do_cache_clean(struct work_struct *work);
374 static struct delayed_work cache_cleaner;
376 void sunrpc_init_cache_detail(struct cache_detail *cd)
378 spin_lock_init(&cd->hash_lock);
379 INIT_LIST_HEAD(&cd->queue);
380 spin_lock(&cache_list_lock);
383 atomic_set(&cd->writers, 0);
386 list_add(&cd->others, &cache_list);
387 spin_unlock(&cache_list_lock);
389 /* start the cleaning process */
390 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
392 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
394 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
397 spin_lock(&cache_list_lock);
398 spin_lock(&cd->hash_lock);
399 if (current_detail == cd)
400 current_detail = NULL;
401 list_del_init(&cd->others);
402 spin_unlock(&cd->hash_lock);
403 spin_unlock(&cache_list_lock);
404 if (list_empty(&cache_list)) {
405 /* module must be being unloaded so its safe to kill the worker */
406 cancel_delayed_work_sync(&cache_cleaner);
409 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
411 /* clean cache tries to find something to clean
413 * It returns 1 if it cleaned something,
414 * 0 if it didn't find anything this time
415 * -1 if it fell off the end of the list.
417 static int cache_clean(void)
420 struct list_head *next;
422 spin_lock(&cache_list_lock);
424 /* find a suitable table if we don't already have one */
425 while (current_detail == NULL ||
426 current_index >= current_detail->hash_size) {
428 next = current_detail->others.next;
430 next = cache_list.next;
431 if (next == &cache_list) {
432 current_detail = NULL;
433 spin_unlock(&cache_list_lock);
436 current_detail = list_entry(next, struct cache_detail, others);
437 if (current_detail->nextcheck > seconds_since_boot())
438 current_index = current_detail->hash_size;
441 current_detail->nextcheck = seconds_since_boot()+30*60;
445 /* find a non-empty bucket in the table */
446 while (current_detail &&
447 current_index < current_detail->hash_size &&
448 hlist_empty(¤t_detail->hash_table[current_index]))
451 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
453 if (current_detail && current_index < current_detail->hash_size) {
454 struct cache_head *ch = NULL;
455 struct cache_detail *d;
456 struct hlist_head *head;
457 struct hlist_node *tmp;
459 spin_lock(¤t_detail->hash_lock);
461 /* Ok, now to clean this strand */
463 head = ¤t_detail->hash_table[current_index];
464 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
465 if (current_detail->nextcheck > ch->expiry_time)
466 current_detail->nextcheck = ch->expiry_time+1;
467 if (!cache_is_expired(current_detail, ch))
470 sunrpc_begin_cache_remove_entry(ch, current_detail);
475 spin_unlock(¤t_detail->hash_lock);
479 spin_unlock(&cache_list_lock);
481 sunrpc_end_cache_remove_entry(ch, d);
483 spin_unlock(&cache_list_lock);
489 * We want to regularly clean the cache, so we need to schedule some work ...
491 static void do_cache_clean(struct work_struct *work)
494 if (cache_clean() == -1)
495 delay = round_jiffies_relative(30*HZ);
497 if (list_empty(&cache_list))
501 queue_delayed_work(system_power_efficient_wq,
502 &cache_cleaner, delay);
507 * Clean all caches promptly. This just calls cache_clean
508 * repeatedly until we are sure that every cache has had a chance to
511 void cache_flush(void)
513 while (cache_clean() != -1)
515 while (cache_clean() != -1)
518 EXPORT_SYMBOL_GPL(cache_flush);
520 void cache_purge(struct cache_detail *detail)
522 struct cache_head *ch = NULL;
523 struct hlist_head *head = NULL;
524 struct hlist_node *tmp = NULL;
527 spin_lock(&detail->hash_lock);
528 if (!detail->entries) {
529 spin_unlock(&detail->hash_lock);
533 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
534 for (i = 0; i < detail->hash_size; i++) {
535 head = &detail->hash_table[i];
536 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
537 sunrpc_begin_cache_remove_entry(ch, detail);
538 spin_unlock(&detail->hash_lock);
539 sunrpc_end_cache_remove_entry(ch, detail);
540 spin_lock(&detail->hash_lock);
543 spin_unlock(&detail->hash_lock);
545 EXPORT_SYMBOL_GPL(cache_purge);
549 * Deferral and Revisiting of Requests.
551 * If a cache lookup finds a pending entry, we
552 * need to defer the request and revisit it later.
553 * All deferred requests are stored in a hash table,
554 * indexed by "struct cache_head *".
555 * As it may be wasteful to store a whole request
556 * structure, we allow the request to provide a
557 * deferred form, which must contain a
558 * 'struct cache_deferred_req'
559 * This cache_deferred_req contains a method to allow
560 * it to be revisited when cache info is available
563 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
564 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
566 #define DFR_MAX 300 /* ??? */
568 static DEFINE_SPINLOCK(cache_defer_lock);
569 static LIST_HEAD(cache_defer_list);
570 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
571 static int cache_defer_cnt;
573 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
575 hlist_del_init(&dreq->hash);
576 if (!list_empty(&dreq->recent)) {
577 list_del_init(&dreq->recent);
582 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
584 int hash = DFR_HASH(item);
586 INIT_LIST_HEAD(&dreq->recent);
587 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
590 static void setup_deferral(struct cache_deferred_req *dreq,
591 struct cache_head *item,
597 spin_lock(&cache_defer_lock);
599 __hash_deferred_req(dreq, item);
603 list_add(&dreq->recent, &cache_defer_list);
606 spin_unlock(&cache_defer_lock);
610 struct thread_deferred_req {
611 struct cache_deferred_req handle;
612 struct completion completion;
615 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
617 struct thread_deferred_req *dr =
618 container_of(dreq, struct thread_deferred_req, handle);
619 complete(&dr->completion);
622 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
624 struct thread_deferred_req sleeper;
625 struct cache_deferred_req *dreq = &sleeper.handle;
627 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
628 dreq->revisit = cache_restart_thread;
630 setup_deferral(dreq, item, 0);
632 if (!test_bit(CACHE_PENDING, &item->flags) ||
633 wait_for_completion_interruptible_timeout(
634 &sleeper.completion, req->thread_wait) <= 0) {
635 /* The completion wasn't completed, so we need
638 spin_lock(&cache_defer_lock);
639 if (!hlist_unhashed(&sleeper.handle.hash)) {
640 __unhash_deferred_req(&sleeper.handle);
641 spin_unlock(&cache_defer_lock);
643 /* cache_revisit_request already removed
644 * this from the hash table, but hasn't
645 * called ->revisit yet. It will very soon
646 * and we need to wait for it.
648 spin_unlock(&cache_defer_lock);
649 wait_for_completion(&sleeper.completion);
654 static void cache_limit_defers(void)
656 /* Make sure we haven't exceed the limit of allowed deferred
659 struct cache_deferred_req *discard = NULL;
661 if (cache_defer_cnt <= DFR_MAX)
664 spin_lock(&cache_defer_lock);
666 /* Consider removing either the first or the last */
667 if (cache_defer_cnt > DFR_MAX) {
668 if (prandom_u32() & 1)
669 discard = list_entry(cache_defer_list.next,
670 struct cache_deferred_req, recent);
672 discard = list_entry(cache_defer_list.prev,
673 struct cache_deferred_req, recent);
674 __unhash_deferred_req(discard);
676 spin_unlock(&cache_defer_lock);
678 discard->revisit(discard, 1);
681 /* Return true if and only if a deferred request is queued. */
682 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
684 struct cache_deferred_req *dreq;
686 if (req->thread_wait) {
687 cache_wait_req(req, item);
688 if (!test_bit(CACHE_PENDING, &item->flags))
691 dreq = req->defer(req);
694 setup_deferral(dreq, item, 1);
695 if (!test_bit(CACHE_PENDING, &item->flags))
696 /* Bit could have been cleared before we managed to
697 * set up the deferral, so need to revisit just in case
699 cache_revisit_request(item);
701 cache_limit_defers();
705 static void cache_revisit_request(struct cache_head *item)
707 struct cache_deferred_req *dreq;
708 struct list_head pending;
709 struct hlist_node *tmp;
710 int hash = DFR_HASH(item);
712 INIT_LIST_HEAD(&pending);
713 spin_lock(&cache_defer_lock);
715 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
716 if (dreq->item == item) {
717 __unhash_deferred_req(dreq);
718 list_add(&dreq->recent, &pending);
721 spin_unlock(&cache_defer_lock);
723 while (!list_empty(&pending)) {
724 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
725 list_del_init(&dreq->recent);
726 dreq->revisit(dreq, 0);
730 void cache_clean_deferred(void *owner)
732 struct cache_deferred_req *dreq, *tmp;
733 struct list_head pending;
736 INIT_LIST_HEAD(&pending);
737 spin_lock(&cache_defer_lock);
739 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
740 if (dreq->owner == owner) {
741 __unhash_deferred_req(dreq);
742 list_add(&dreq->recent, &pending);
745 spin_unlock(&cache_defer_lock);
747 while (!list_empty(&pending)) {
748 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
749 list_del_init(&dreq->recent);
750 dreq->revisit(dreq, 1);
755 * communicate with user-space
757 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
758 * On read, you get a full request, or block.
759 * On write, an update request is processed.
760 * Poll works if anything to read, and always allows write.
762 * Implemented by linked list of requests. Each open file has
763 * a ->private that also exists in this list. New requests are added
764 * to the end and may wakeup and preceding readers.
765 * New readers are added to the head. If, on read, an item is found with
766 * CACHE_UPCALLING clear, we free it from the list.
770 static DEFINE_SPINLOCK(queue_lock);
771 static DEFINE_MUTEX(queue_io_mutex);
774 struct list_head list;
775 int reader; /* if 0, then request */
777 struct cache_request {
778 struct cache_queue q;
779 struct cache_head *item;
784 struct cache_reader {
785 struct cache_queue q;
786 int offset; /* if non-0, we have a refcnt on next request */
789 static int cache_request(struct cache_detail *detail,
790 struct cache_request *crq)
795 detail->cache_request(detail, crq->item, &bp, &len);
798 return PAGE_SIZE - len;
801 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
802 loff_t *ppos, struct cache_detail *cd)
804 struct cache_reader *rp = filp->private_data;
805 struct cache_request *rq;
806 struct inode *inode = file_inode(filp);
812 inode_lock(inode); /* protect against multiple concurrent
813 * readers on this file */
815 spin_lock(&queue_lock);
816 /* need to find next request */
817 while (rp->q.list.next != &cd->queue &&
818 list_entry(rp->q.list.next, struct cache_queue, list)
820 struct list_head *next = rp->q.list.next;
821 list_move(&rp->q.list, next);
823 if (rp->q.list.next == &cd->queue) {
824 spin_unlock(&queue_lock);
826 WARN_ON_ONCE(rp->offset);
829 rq = container_of(rp->q.list.next, struct cache_request, q.list);
830 WARN_ON_ONCE(rq->q.reader);
833 spin_unlock(&queue_lock);
836 err = cache_request(cd, rq);
842 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
844 spin_lock(&queue_lock);
845 list_move(&rp->q.list, &rq->q.list);
846 spin_unlock(&queue_lock);
848 if (rp->offset + count > rq->len)
849 count = rq->len - rp->offset;
851 if (copy_to_user(buf, rq->buf + rp->offset, count))
854 if (rp->offset >= rq->len) {
856 spin_lock(&queue_lock);
857 list_move(&rp->q.list, &rq->q.list);
858 spin_unlock(&queue_lock);
863 if (rp->offset == 0) {
864 /* need to release rq */
865 spin_lock(&queue_lock);
867 if (rq->readers == 0 &&
868 !test_bit(CACHE_PENDING, &rq->item->flags)) {
869 list_del(&rq->q.list);
870 spin_unlock(&queue_lock);
871 cache_put(rq->item, cd);
875 spin_unlock(&queue_lock);
880 return err ? err : count;
883 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
884 size_t count, struct cache_detail *cd)
890 if (copy_from_user(kaddr, buf, count))
893 ret = cd->cache_parse(cd, kaddr, count);
899 static ssize_t cache_slow_downcall(const char __user *buf,
900 size_t count, struct cache_detail *cd)
902 static char write_buf[8192]; /* protected by queue_io_mutex */
903 ssize_t ret = -EINVAL;
905 if (count >= sizeof(write_buf))
907 mutex_lock(&queue_io_mutex);
908 ret = cache_do_downcall(write_buf, buf, count, cd);
909 mutex_unlock(&queue_io_mutex);
914 static ssize_t cache_downcall(struct address_space *mapping,
915 const char __user *buf,
916 size_t count, struct cache_detail *cd)
920 ssize_t ret = -ENOMEM;
922 if (count >= PAGE_SIZE)
925 page = find_or_create_page(mapping, 0, GFP_KERNEL);
930 ret = cache_do_downcall(kaddr, buf, count, cd);
936 return cache_slow_downcall(buf, count, cd);
939 static ssize_t cache_write(struct file *filp, const char __user *buf,
940 size_t count, loff_t *ppos,
941 struct cache_detail *cd)
943 struct address_space *mapping = filp->f_mapping;
944 struct inode *inode = file_inode(filp);
945 ssize_t ret = -EINVAL;
947 if (!cd->cache_parse)
951 ret = cache_downcall(mapping, buf, count, cd);
957 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
959 static __poll_t cache_poll(struct file *filp, poll_table *wait,
960 struct cache_detail *cd)
963 struct cache_reader *rp = filp->private_data;
964 struct cache_queue *cq;
966 poll_wait(filp, &queue_wait, wait);
968 /* alway allow write */
969 mask = EPOLLOUT | EPOLLWRNORM;
974 spin_lock(&queue_lock);
976 for (cq= &rp->q; &cq->list != &cd->queue;
977 cq = list_entry(cq->list.next, struct cache_queue, list))
979 mask |= EPOLLIN | EPOLLRDNORM;
982 spin_unlock(&queue_lock);
986 static int cache_ioctl(struct inode *ino, struct file *filp,
987 unsigned int cmd, unsigned long arg,
988 struct cache_detail *cd)
991 struct cache_reader *rp = filp->private_data;
992 struct cache_queue *cq;
994 if (cmd != FIONREAD || !rp)
997 spin_lock(&queue_lock);
999 /* only find the length remaining in current request,
1000 * or the length of the next request
1002 for (cq= &rp->q; &cq->list != &cd->queue;
1003 cq = list_entry(cq->list.next, struct cache_queue, list))
1005 struct cache_request *cr =
1006 container_of(cq, struct cache_request, q);
1007 len = cr->len - rp->offset;
1010 spin_unlock(&queue_lock);
1012 return put_user(len, (int __user *)arg);
1015 static int cache_open(struct inode *inode, struct file *filp,
1016 struct cache_detail *cd)
1018 struct cache_reader *rp = NULL;
1020 if (!cd || !try_module_get(cd->owner))
1022 nonseekable_open(inode, filp);
1023 if (filp->f_mode & FMODE_READ) {
1024 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1026 module_put(cd->owner);
1032 spin_lock(&queue_lock);
1033 list_add(&rp->q.list, &cd->queue);
1034 spin_unlock(&queue_lock);
1036 if (filp->f_mode & FMODE_WRITE)
1037 atomic_inc(&cd->writers);
1038 filp->private_data = rp;
1042 static int cache_release(struct inode *inode, struct file *filp,
1043 struct cache_detail *cd)
1045 struct cache_reader *rp = filp->private_data;
1048 spin_lock(&queue_lock);
1050 struct cache_queue *cq;
1051 for (cq= &rp->q; &cq->list != &cd->queue;
1052 cq = list_entry(cq->list.next, struct cache_queue, list))
1054 container_of(cq, struct cache_request, q)
1060 list_del(&rp->q.list);
1061 spin_unlock(&queue_lock);
1063 filp->private_data = NULL;
1067 if (filp->f_mode & FMODE_WRITE) {
1068 atomic_dec(&cd->writers);
1069 cd->last_close = seconds_since_boot();
1071 module_put(cd->owner);
1077 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1079 struct cache_queue *cq, *tmp;
1080 struct cache_request *cr;
1081 struct list_head dequeued;
1083 INIT_LIST_HEAD(&dequeued);
1084 spin_lock(&queue_lock);
1085 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1087 cr = container_of(cq, struct cache_request, q);
1090 if (test_bit(CACHE_PENDING, &ch->flags))
1091 /* Lost a race and it is pending again */
1093 if (cr->readers != 0)
1095 list_move(&cr->q.list, &dequeued);
1097 spin_unlock(&queue_lock);
1098 while (!list_empty(&dequeued)) {
1099 cr = list_entry(dequeued.next, struct cache_request, q.list);
1100 list_del(&cr->q.list);
1101 cache_put(cr->item, detail);
1108 * Support routines for text-based upcalls.
1109 * Fields are separated by spaces.
1110 * Fields are either mangled to quote space tab newline slosh with slosh
1111 * or a hexified with a leading \x
1112 * Record is terminated with newline.
1116 void qword_add(char **bpp, int *lp, char *str)
1122 if (len < 0) return;
1124 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1137 EXPORT_SYMBOL_GPL(qword_add);
1139 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1144 if (len < 0) return;
1150 while (blen && len >= 2) {
1151 bp = hex_byte_pack(bp, *buf++);
1156 if (blen || len<1) len = -1;
1164 EXPORT_SYMBOL_GPL(qword_addhex);
1166 static void warn_no_listener(struct cache_detail *detail)
1168 if (detail->last_warn != detail->last_close) {
1169 detail->last_warn = detail->last_close;
1170 if (detail->warn_no_listener)
1171 detail->warn_no_listener(detail, detail->last_close != 0);
1175 static bool cache_listeners_exist(struct cache_detail *detail)
1177 if (atomic_read(&detail->writers))
1179 if (detail->last_close == 0)
1180 /* This cache was never opened */
1182 if (detail->last_close < seconds_since_boot() - 30)
1184 * We allow for the possibility that someone might
1185 * restart a userspace daemon without restarting the
1186 * server; but after 30 seconds, we give up.
1193 * register an upcall request to user-space and queue it up for read() by the
1196 * Each request is at most one page long.
1198 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1202 struct cache_request *crq;
1205 if (!detail->cache_request)
1208 if (!cache_listeners_exist(detail)) {
1209 warn_no_listener(detail);
1212 if (test_bit(CACHE_CLEANED, &h->flags))
1213 /* Too late to make an upcall */
1216 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1220 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1230 spin_lock(&queue_lock);
1231 if (test_bit(CACHE_PENDING, &h->flags)) {
1232 crq->item = cache_get(h);
1233 list_add_tail(&crq->q.list, &detail->queue);
1235 /* Lost a race, no longer PENDING, so don't enqueue */
1237 spin_unlock(&queue_lock);
1238 wake_up(&queue_wait);
1239 if (ret == -EAGAIN) {
1245 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1248 * parse a message from user-space and pass it
1249 * to an appropriate cache
1250 * Messages are, like requests, separated into fields by
1251 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1254 * reply cachename expiry key ... content....
1256 * key and content are both parsed by cache
1259 int qword_get(char **bpp, char *dest, int bufsize)
1261 /* return bytes copied, or -1 on error */
1265 while (*bp == ' ') bp++;
1267 if (bp[0] == '\\' && bp[1] == 'x') {
1270 while (len < bufsize - 1) {
1273 h = hex_to_bin(bp[0]);
1277 l = hex_to_bin(bp[1]);
1281 *dest++ = (h << 4) | l;
1286 /* text with \nnn octal quoting */
1287 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1289 isodigit(bp[1]) && (bp[1] <= '3') &&
1292 int byte = (*++bp -'0');
1294 byte = (byte << 3) | (*bp++ - '0');
1295 byte = (byte << 3) | (*bp++ - '0');
1305 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1307 while (*bp == ' ') bp++;
1312 EXPORT_SYMBOL_GPL(qword_get);
1316 * support /proc/net/rpc/$CACHENAME/content
1318 * We call ->cache_show passing NULL for the item to
1319 * get a header, then pass each real item in the cache
1322 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1325 unsigned int hash, entry;
1326 struct cache_head *ch;
1327 struct cache_detail *cd = m->private;
1330 return SEQ_START_TOKEN;
1332 entry = n & ((1LL<<32) - 1);
1334 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1337 n &= ~((1LL<<32) - 1);
1341 } while(hash < cd->hash_size &&
1342 hlist_empty(&cd->hash_table[hash]));
1343 if (hash >= cd->hash_size)
1346 return hlist_entry_safe(rcu_dereference_raw(
1347 hlist_first_rcu(&cd->hash_table[hash])),
1348 struct cache_head, cache_list);
1351 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1353 struct cache_head *ch = p;
1354 int hash = (*pos >> 32);
1355 struct cache_detail *cd = m->private;
1357 if (p == SEQ_START_TOKEN)
1359 else if (ch->cache_list.next == NULL) {
1364 return hlist_entry_safe(rcu_dereference_raw(
1365 hlist_next_rcu(&ch->cache_list)),
1366 struct cache_head, cache_list);
1368 *pos &= ~((1LL<<32) - 1);
1369 while (hash < cd->hash_size &&
1370 hlist_empty(&cd->hash_table[hash])) {
1374 if (hash >= cd->hash_size)
1377 return hlist_entry_safe(rcu_dereference_raw(
1378 hlist_first_rcu(&cd->hash_table[hash])),
1379 struct cache_head, cache_list);
1382 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1386 return __cache_seq_start(m, pos);
1388 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1390 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1392 return cache_seq_next(file, p, pos);
1394 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1396 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1401 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1403 static int c_show(struct seq_file *m, void *p)
1405 struct cache_head *cp = p;
1406 struct cache_detail *cd = m->private;
1408 if (p == SEQ_START_TOKEN)
1409 return cd->cache_show(m, cd, NULL);
1412 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1413 convert_to_wallclock(cp->expiry_time),
1414 kref_read(&cp->ref), cp->flags);
1416 if (cache_check(cd, cp, NULL))
1417 /* cache_check does a cache_put on failure */
1418 seq_printf(m, "# ");
1420 if (cache_is_expired(cd, cp))
1421 seq_printf(m, "# ");
1425 return cd->cache_show(m, cd, cp);
1428 static const struct seq_operations cache_content_op = {
1429 .start = cache_seq_start_rcu,
1430 .next = cache_seq_next_rcu,
1431 .stop = cache_seq_stop_rcu,
1435 static int content_open(struct inode *inode, struct file *file,
1436 struct cache_detail *cd)
1438 struct seq_file *seq;
1441 if (!cd || !try_module_get(cd->owner))
1444 err = seq_open(file, &cache_content_op);
1446 module_put(cd->owner);
1450 seq = file->private_data;
1455 static int content_release(struct inode *inode, struct file *file,
1456 struct cache_detail *cd)
1458 int ret = seq_release(inode, file);
1459 module_put(cd->owner);
1463 static int open_flush(struct inode *inode, struct file *file,
1464 struct cache_detail *cd)
1466 if (!cd || !try_module_get(cd->owner))
1468 return nonseekable_open(inode, file);
1471 static int release_flush(struct inode *inode, struct file *file,
1472 struct cache_detail *cd)
1474 module_put(cd->owner);
1478 static ssize_t read_flush(struct file *file, char __user *buf,
1479 size_t count, loff_t *ppos,
1480 struct cache_detail *cd)
1485 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1486 convert_to_wallclock(cd->flush_time));
1487 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1490 static ssize_t write_flush(struct file *file, const char __user *buf,
1491 size_t count, loff_t *ppos,
1492 struct cache_detail *cd)
1498 if (*ppos || count > sizeof(tbuf)-1)
1500 if (copy_from_user(tbuf, buf, count))
1503 simple_strtoul(tbuf, &ep, 0);
1504 if (*ep && *ep != '\n')
1506 /* Note that while we check that 'buf' holds a valid number,
1507 * we always ignore the value and just flush everything.
1508 * Making use of the number leads to races.
1511 now = seconds_since_boot();
1512 /* Always flush everything, so behave like cache_purge()
1513 * Do this by advancing flush_time to the current time,
1514 * or by one second if it has already reached the current time.
1515 * Newly added cache entries will always have ->last_refresh greater
1516 * that ->flush_time, so they don't get flushed prematurely.
1519 if (cd->flush_time >= now)
1520 now = cd->flush_time + 1;
1522 cd->flush_time = now;
1523 cd->nextcheck = now;
1533 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1534 size_t count, loff_t *ppos)
1536 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1538 return cache_read(filp, buf, count, ppos, cd);
1541 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1542 size_t count, loff_t *ppos)
1544 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1546 return cache_write(filp, buf, count, ppos, cd);
1549 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1551 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1553 return cache_poll(filp, wait, cd);
1556 static long cache_ioctl_procfs(struct file *filp,
1557 unsigned int cmd, unsigned long arg)
1559 struct inode *inode = file_inode(filp);
1560 struct cache_detail *cd = PDE_DATA(inode);
1562 return cache_ioctl(inode, filp, cmd, arg, cd);
1565 static int cache_open_procfs(struct inode *inode, struct file *filp)
1567 struct cache_detail *cd = PDE_DATA(inode);
1569 return cache_open(inode, filp, cd);
1572 static int cache_release_procfs(struct inode *inode, struct file *filp)
1574 struct cache_detail *cd = PDE_DATA(inode);
1576 return cache_release(inode, filp, cd);
1579 static const struct proc_ops cache_channel_proc_ops = {
1580 .proc_lseek = no_llseek,
1581 .proc_read = cache_read_procfs,
1582 .proc_write = cache_write_procfs,
1583 .proc_poll = cache_poll_procfs,
1584 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1585 .proc_open = cache_open_procfs,
1586 .proc_release = cache_release_procfs,
1589 static int content_open_procfs(struct inode *inode, struct file *filp)
1591 struct cache_detail *cd = PDE_DATA(inode);
1593 return content_open(inode, filp, cd);
1596 static int content_release_procfs(struct inode *inode, struct file *filp)
1598 struct cache_detail *cd = PDE_DATA(inode);
1600 return content_release(inode, filp, cd);
1603 static const struct proc_ops content_proc_ops = {
1604 .proc_open = content_open_procfs,
1605 .proc_read = seq_read,
1606 .proc_lseek = seq_lseek,
1607 .proc_release = content_release_procfs,
1610 static int open_flush_procfs(struct inode *inode, struct file *filp)
1612 struct cache_detail *cd = PDE_DATA(inode);
1614 return open_flush(inode, filp, cd);
1617 static int release_flush_procfs(struct inode *inode, struct file *filp)
1619 struct cache_detail *cd = PDE_DATA(inode);
1621 return release_flush(inode, filp, cd);
1624 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1625 size_t count, loff_t *ppos)
1627 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1629 return read_flush(filp, buf, count, ppos, cd);
1632 static ssize_t write_flush_procfs(struct file *filp,
1633 const char __user *buf,
1634 size_t count, loff_t *ppos)
1636 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1638 return write_flush(filp, buf, count, ppos, cd);
1641 static const struct proc_ops cache_flush_proc_ops = {
1642 .proc_open = open_flush_procfs,
1643 .proc_read = read_flush_procfs,
1644 .proc_write = write_flush_procfs,
1645 .proc_release = release_flush_procfs,
1646 .proc_lseek = no_llseek,
1649 static void remove_cache_proc_entries(struct cache_detail *cd)
1652 proc_remove(cd->procfs);
1657 #ifdef CONFIG_PROC_FS
1658 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1660 struct proc_dir_entry *p;
1661 struct sunrpc_net *sn;
1663 sn = net_generic(net, sunrpc_net_id);
1664 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1665 if (cd->procfs == NULL)
1668 p = proc_create_data("flush", S_IFREG | 0600,
1669 cd->procfs, &cache_flush_proc_ops, cd);
1673 if (cd->cache_request || cd->cache_parse) {
1674 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1675 &cache_channel_proc_ops, cd);
1679 if (cd->cache_show) {
1680 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1681 &content_proc_ops, cd);
1687 remove_cache_proc_entries(cd);
1690 #else /* CONFIG_PROC_FS */
1691 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1697 void __init cache_initialize(void)
1699 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1702 int cache_register_net(struct cache_detail *cd, struct net *net)
1706 sunrpc_init_cache_detail(cd);
1707 ret = create_cache_proc_entries(cd, net);
1709 sunrpc_destroy_cache_detail(cd);
1712 EXPORT_SYMBOL_GPL(cache_register_net);
1714 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1716 remove_cache_proc_entries(cd);
1717 sunrpc_destroy_cache_detail(cd);
1719 EXPORT_SYMBOL_GPL(cache_unregister_net);
1721 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1723 struct cache_detail *cd;
1726 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1728 return ERR_PTR(-ENOMEM);
1730 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1732 if (cd->hash_table == NULL) {
1734 return ERR_PTR(-ENOMEM);
1737 for (i = 0; i < cd->hash_size; i++)
1738 INIT_HLIST_HEAD(&cd->hash_table[i]);
1742 EXPORT_SYMBOL_GPL(cache_create_net);
1744 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1746 kfree(cd->hash_table);
1749 EXPORT_SYMBOL_GPL(cache_destroy_net);
1751 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1752 size_t count, loff_t *ppos)
1754 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1756 return cache_read(filp, buf, count, ppos, cd);
1759 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1760 size_t count, loff_t *ppos)
1762 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1764 return cache_write(filp, buf, count, ppos, cd);
1767 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1769 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1771 return cache_poll(filp, wait, cd);
1774 static long cache_ioctl_pipefs(struct file *filp,
1775 unsigned int cmd, unsigned long arg)
1777 struct inode *inode = file_inode(filp);
1778 struct cache_detail *cd = RPC_I(inode)->private;
1780 return cache_ioctl(inode, filp, cmd, arg, cd);
1783 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1785 struct cache_detail *cd = RPC_I(inode)->private;
1787 return cache_open(inode, filp, cd);
1790 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1792 struct cache_detail *cd = RPC_I(inode)->private;
1794 return cache_release(inode, filp, cd);
1797 const struct file_operations cache_file_operations_pipefs = {
1798 .owner = THIS_MODULE,
1799 .llseek = no_llseek,
1800 .read = cache_read_pipefs,
1801 .write = cache_write_pipefs,
1802 .poll = cache_poll_pipefs,
1803 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1804 .open = cache_open_pipefs,
1805 .release = cache_release_pipefs,
1808 static int content_open_pipefs(struct inode *inode, struct file *filp)
1810 struct cache_detail *cd = RPC_I(inode)->private;
1812 return content_open(inode, filp, cd);
1815 static int content_release_pipefs(struct inode *inode, struct file *filp)
1817 struct cache_detail *cd = RPC_I(inode)->private;
1819 return content_release(inode, filp, cd);
1822 const struct file_operations content_file_operations_pipefs = {
1823 .open = content_open_pipefs,
1825 .llseek = seq_lseek,
1826 .release = content_release_pipefs,
1829 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1831 struct cache_detail *cd = RPC_I(inode)->private;
1833 return open_flush(inode, filp, cd);
1836 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1838 struct cache_detail *cd = RPC_I(inode)->private;
1840 return release_flush(inode, filp, cd);
1843 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1844 size_t count, loff_t *ppos)
1846 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1848 return read_flush(filp, buf, count, ppos, cd);
1851 static ssize_t write_flush_pipefs(struct file *filp,
1852 const char __user *buf,
1853 size_t count, loff_t *ppos)
1855 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1857 return write_flush(filp, buf, count, ppos, cd);
1860 const struct file_operations cache_flush_operations_pipefs = {
1861 .open = open_flush_pipefs,
1862 .read = read_flush_pipefs,
1863 .write = write_flush_pipefs,
1864 .release = release_flush_pipefs,
1865 .llseek = no_llseek,
1868 int sunrpc_cache_register_pipefs(struct dentry *parent,
1869 const char *name, umode_t umode,
1870 struct cache_detail *cd)
1872 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1874 return PTR_ERR(dir);
1878 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1880 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1883 rpc_remove_cache_dir(cd->pipefs);
1887 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1889 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1891 spin_lock(&cd->hash_lock);
1892 if (!hlist_unhashed(&h->cache_list)){
1893 sunrpc_begin_cache_remove_entry(h, cd);
1894 spin_unlock(&cd->hash_lock);
1895 sunrpc_end_cache_remove_entry(h, cd);
1897 spin_unlock(&cd->hash_lock);
1899 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);