]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
RDS: Use a generation counter to avoid rds_send_xmit loop
authorChris Mason <chris.mason@oracle.com>
Thu, 15 Apr 2010 20:38:14 +0000 (16:38 -0400)
committerAndy Grover <andy.grover@oracle.com>
Thu, 9 Sep 2010 01:12:24 +0000 (18:12 -0700)
rds_send_xmit is required to loop around after it releases the lock
because someone else could done a trylock, found someone working on the
list and backed off.

But, once we drop our lock, it is possible that someone else does come
in and make progress on the list.  We should detect this and not loop
around if another process is actually working on the list.

This patch adds a generation counter that is bumped every time we
get the lock and do some send work.  If the retry notices someone else
has bumped the generation counter, it does not need to loop around and
continue working.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Signed-off-by: Andy Grover <andy.grover@oracle.com>
net/rds/connection.c
net/rds/rds.h
net/rds/send.c

index 56aebe444ad35b165706ca84f2af8f90e8a212a0..7e4e9dfdbc0b66863b191b54d375b477e822b4fa 100644 (file)
@@ -147,6 +147,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
        conn->c_next_tx_seq = 1;
 
        spin_lock_init(&conn->c_send_lock);
+       atomic_set(&conn->c_send_generation, 1);
        INIT_LIST_HEAD(&conn->c_send_queue);
        INIT_LIST_HEAD(&conn->c_retrans);
 
index 2f19d49fac9c6bf86a76955fac52ecbef644fe87..b57cb50c1f225975318f7bac6b810a9900508a04 100644 (file)
@@ -92,6 +92,7 @@ struct rds_connection {
        struct rds_cong_map     *c_fcong;
 
        spinlock_t              c_send_lock;    /* protect send ring */
+       atomic_t                c_send_generation;
        struct rds_message      *c_xmit_rm;
        unsigned long           c_xmit_sg;
        unsigned int            c_xmit_hdr_off;
index de5693cdcefb10fdd2cef374d2f5983ac392cccc..663fd60b40cf520e29c4f99e22521330d8f8103f 100644 (file)
@@ -112,6 +112,7 @@ int rds_send_xmit(struct rds_connection *conn)
        unsigned int tmp;
        struct scatterlist *sg;
        int ret = 0;
+       int gen = 0;
        LIST_HEAD(to_be_dropped);
 
 restart:
@@ -134,6 +135,8 @@ restart:
        if (conn->c_trans->xmit_prepare)
                conn->c_trans->xmit_prepare(conn);
 
+       gen = atomic_inc_return(&conn->c_send_generation);
+
        /*
         * spin trying to push headers and data down the connection until
         * the connection doesn't make forward progress.
@@ -359,13 +362,13 @@ restart:
        if (ret == 0) {
                /* A simple bit test would be way faster than taking the
                 * spin lock */
-               spin_lock_irqsave(&conn->c_lock, flags);
+               smp_mb();
                if (!list_empty(&conn->c_send_queue)) {
                        rds_stats_inc(s_send_lock_queue_raced);
-                       spin_unlock_irqrestore(&conn->c_lock, flags);
-                       goto restart;
+                       if (gen == atomic_read(&conn->c_send_generation)) {
+                               goto restart;
+                       }
                }
-               spin_unlock_irqrestore(&conn->c_lock, flags);
        }
 out:
        return ret;