]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - net/ipv4/tcp_output.c
tcp: change tcp_skb_pcount() location
[karo-tx-linux.git] / net / ipv4 / tcp_output.c
index 8c61a7c0c88961ff07db6b12949b5befb09f7318..4d92703df4c62457a914dc2603c7a6018fb75876 100644 (file)
@@ -384,7 +384,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
        TCP_SKB_CB(skb)->tcp_flags = flags;
        TCP_SKB_CB(skb)->sacked = 0;
 
-       shinfo->gso_segs = 1;
+       tcp_skb_pcount_set(skb, 1);
        shinfo->gso_size = 0;
        shinfo->gso_type = 0;
 
@@ -972,8 +972,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
                              tcp_skb_pcount(skb));
 
+       /* OK, its time to fill skb_shinfo(skb)->gso_segs */
+       skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
+
        /* Our usage of tstamp should remain private */
        skb->tstamp.tv64 = 0;
+
+       /* Cleanup our debris for IP stacks */
+       memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
+                              sizeof(struct inet6_skb_parm)));
+
        err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
 
        if (likely(err <= 0))
@@ -995,7 +1003,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 
        /* Advance write_seq and place onto the write_queue. */
        tp->write_seq = TCP_SKB_CB(skb)->end_seq;
-       skb_header_release(skb);
+       __skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
        sk->sk_wmem_queued += skb->truesize;
        sk_mem_charge(sk, skb->truesize);
@@ -1014,11 +1022,11 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
                /* Avoid the costly divide in the normal
                 * non-TSO case.
                 */
-               shinfo->gso_segs = 1;
+               tcp_skb_pcount_set(skb, 1);
                shinfo->gso_size = 0;
                shinfo->gso_type = 0;
        } else {
-               shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
+               tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
                shinfo->gso_size = mss_now;
                shinfo->gso_type = sk->sk_gso_type;
        }
@@ -1167,7 +1175,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
        }
 
        /* Link BUFF into the send queue. */
-       skb_header_release(buff);
+       __skb_header_release(buff);
        tcp_insert_write_queue_after(skb, buff, sk);
 
        return 0;
@@ -1671,7 +1679,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
        tcp_set_skb_tso_segs(sk, buff, mss_now);
 
        /* Link BUFF into the send queue. */
-       skb_header_release(buff);
+       __skb_header_release(buff);
        tcp_insert_write_queue_after(skb, buff, sk);
 
        return 0;
@@ -2772,7 +2780,7 @@ int tcp_send_synack(struct sock *sk)
                        if (nskb == NULL)
                                return -ENOMEM;
                        tcp_unlink_write_queue(skb, sk);
-                       skb_header_release(nskb);
+                       __skb_header_release(nskb);
                        __tcp_add_write_queue_head(sk, nskb);
                        sk_wmem_free_skb(sk, skb);
                        sk->sk_wmem_queued += nskb->truesize;
@@ -2947,7 +2955,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
        struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
        tcb->end_seq += skb->len;
-       skb_header_release(skb);
+       __skb_header_release(skb);
        __tcp_add_write_queue_tail(sk, skb);
        sk->sk_wmem_queued += skb->truesize;
        sk_mem_charge(sk, skb->truesize);