svc: Clean up deferred requests on transport destruction
authorTom Tucker <tom@opengridcomputing.com>
Mon, 5 Jan 2009 21:21:19 +0000 (15:21 -0600)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 26 May 2010 21:27:08 +0000 (14:27 -0700)
commit 22945e4a1c7454c97f5d8aee1ef526c83fef3223 upstream.

A race between svc_revisit and svc_delete_xprt can result in
deferred requests holding references on a transport that can never be
recovered because dead transports are not enqueued for subsequent
processing.

Check for XPT_DEAD in revisit to clean up completing deferrals on a dead
transport and sweep a transport's deferred queue to do the same for queued
but unprocessed deferrals.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Cc: roma1390 <roma1390@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
net/sunrpc/svc_xprt.c

index e46c825f49548923f79f1363d4a7712ef4ead513..4c772de51bb4a4b464935c5518d953479faf7146 100644 (file)
@@ -810,6 +810,11 @@ static void svc_age_temp_xprts(unsigned long closure)
 void svc_delete_xprt(struct svc_xprt *xprt)
 {
        struct svc_serv *serv = xprt->xpt_server;
+       struct svc_deferred_req *dr;
+
+       /* Only do this once */
+       if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
+               return;
 
        dprintk("svc: svc_delete_xprt(%p)\n", xprt);
        xprt->xpt_ops->xpo_detach(xprt);
@@ -824,12 +829,16 @@ void svc_delete_xprt(struct svc_xprt *xprt)
         * while still attached to a queue, the queue itself
         * is about to be destroyed (in svc_destroy).
         */
-       if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) {
-               BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2);
-               if (test_bit(XPT_TEMP, &xprt->xpt_flags))
-                       serv->sv_tmpcnt--;
+       if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+               serv->sv_tmpcnt--;
+
+       for (dr = svc_deferred_dequeue(xprt); dr;
+            dr = svc_deferred_dequeue(xprt)) {
                svc_xprt_put(xprt);
+               kfree(dr);
        }
+
+       svc_xprt_put(xprt);
        spin_unlock_bh(&serv->sv_lock);
 }
 
@@ -875,17 +884,19 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
                container_of(dreq, struct svc_deferred_req, handle);
        struct svc_xprt *xprt = dr->xprt;
 
-       if (too_many) {
+       spin_lock(&xprt->xpt_lock);
+       set_bit(XPT_DEFERRED, &xprt->xpt_flags);
+       if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
+               spin_unlock(&xprt->xpt_lock);
+               dprintk("revisit canceled\n");
                svc_xprt_put(xprt);
                kfree(dr);
                return;
        }
        dprintk("revisit queued\n");
        dr->xprt = NULL;
-       spin_lock(&xprt->xpt_lock);
        list_add(&dr->handle.recent, &xprt->xpt_deferred);
        spin_unlock(&xprt->xpt_lock);
-       set_bit(XPT_DEFERRED, &xprt->xpt_flags);
        svc_xprt_enqueue(xprt);
        svc_xprt_put(xprt);
 }