]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
nfsd: eliminate one of the DRC cache searches
authorJeff Layton <jlayton@redhat.com>
Wed, 27 Mar 2013 14:15:37 +0000 (10:15 -0400)
committerJ. Bruce Fields <bfields@redhat.com>
Wed, 3 Apr 2013 15:47:22 +0000 (11:47 -0400)
The most common case is to do a search of the cache, followed by an
insert. In the case where we have to allocate an entry off the slab,
then we end up having to redo the search, which is wasteful.

Better optimize the code for the common case by eliminating the initial
search of the cache and always preallocating an entry. In the case of a
cache hit, we'll end up just freeing that entry but that's preferable to
an extra search.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
fs/nfsd/nfscache.c

index ca05f6dc3544b4c216eb369d9e01fe7a48f0e90a..c61391e8e09dd2d5e7108c9e3dab134c779ffa83 100644 (file)
@@ -318,55 +318,53 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
        __wsum                  csum;
        unsigned long           age;
        int type = rqstp->rq_cachetype;
-       int rtn;
+       int rtn = RC_DOIT;
 
        rqstp->rq_cacherep = NULL;
        if (type == RC_NOCACHE) {
                nfsdstats.rcnocache++;
-               return RC_DOIT;
+               return rtn;
        }
 
        csum = nfsd_cache_csum(rqstp);
 
+       /*
+        * Since the common case is a cache miss followed by an insert,
+        * preallocate an entry. First, try to reuse the first entry on the LRU
+        * if it works, then go ahead and prune the LRU list.
+        */
        spin_lock(&cache_lock);
-       rtn = RC_DOIT;
-
-       rp = nfsd_cache_search(rqstp, csum);
-       if (rp)
-               goto found_entry;
-
-       /* Try to use the first entry on the LRU */
        if (!list_empty(&lru_head)) {
                rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
                if (nfsd_cache_entry_expired(rp) ||
                    num_drc_entries >= max_drc_entries) {
                        lru_put_end(rp);
                        prune_cache_entries();
-                       goto setup_entry;
+                       goto search_cache;
                }
        }
 
-       /* Drop the lock and allocate a new entry */
+       /* No expired ones available, allocate a new one. */
        spin_unlock(&cache_lock);
        rp = nfsd_reply_cache_alloc();
-       if (!rp) {
-               dprintk("nfsd: unable to allocate DRC entry!\n");
-               return RC_DOIT;
-       }
        spin_lock(&cache_lock);
-       ++num_drc_entries;
+       if (likely(rp))
+               ++num_drc_entries;
 
-       /*
-        * Must search again just in case someone inserted one
-        * after we dropped the lock above.
-        */
+search_cache:
        found = nfsd_cache_search(rqstp, csum);
        if (found) {
-               nfsd_reply_cache_free_locked(rp);
+               if (likely(rp))
+                       nfsd_reply_cache_free_locked(rp);
                rp = found;
                goto found_entry;
        }
 
+       if (!rp) {
+               dprintk("nfsd: unable to allocate DRC entry!\n");
+               goto out;
+       }
+
        /*
         * We're keeping the one we just allocated. Are we now over the
         * limit? Prune one off the tip of the LRU in trade for the one we
@@ -376,7 +374,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
                nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
                                                struct svc_cacherep, c_lru));
 
-setup_entry:
        nfsdstats.rcmisses++;
        rqstp->rq_cacherep = rp;
        rp->c_state = RC_INPROG;