]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - net/ipv4/tcp_input.c
ipv4: coding style: comparison for inequality with NULL
[karo-tx-linux.git] / net / ipv4 / tcp_input.c
index 18b80e8bc5336564560b7897a939bbbb2d83e5ed..df7e7fa1273333a963eddf799eebd2b5bf71e9ad 100644 (file)
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
 /* This must be called before lost_out is incremented */
 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if ((tp->retransmit_skb_hint == NULL) ||
+       if (!tp->retransmit_skb_hint ||
            before(TCP_SKB_CB(skb)->seq,
                   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                tp->retransmit_skb_hint = skb;
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
                fack_count += pcount;
 
                /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
-               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+               if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
                    before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
                        tp->lost_cnt_hint += pcount;
 
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (!before(TCP_SKB_CB(skb)->seq, end_seq))
                        break;
 
-               if ((next_dup != NULL) &&
+               if (next_dup  &&
                    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
                        in_sack = tcp_match_skb_to_sack(sk, skb,
                                                        next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (in_sack <= 0) {
                        tmp = tcp_shift_skb_data(sk, skb, state,
                                                 start_seq, end_seq, dup_sack);
-                       if (tmp != NULL) {
+                       if (tmp) {
                                if (tmp != skb) {
                                        skb = tmp;
                                        continue;
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
                                                struct tcp_sacktag_state *state,
                                                u32 skip_to_seq)
 {
-       if (next_dup == NULL)
+       if (!next_dup)
                return skb;
 
        if (before(next_dup->start_seq, skip_to_seq)) {
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                        if (tcp_highest_sack_seq(tp) == cache->end_seq) {
                                /* ...but better entrypoint exists! */
                                skb = tcp_highest_sack(sk);
-                               if (skb == NULL)
+                               if (!skb)
                                        break;
                                state.fack_count = tp->fackets_out;
                                cache++;
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
                if (!before(start_seq, tcp_highest_sack_seq(tp))) {
                        skb = tcp_highest_sack(sk);
-                       if (skb == NULL)
+                       if (!skb)
                                break;
                        state.fack_count = tp->fackets_out;
                }
@@ -3698,7 +3698,7 @@ void tcp_parse_options(const struct sk_buff *skb,
                                 */
                                if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
                                    get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
-                                   foc == NULL || !th->syn || (opsize & 1))
+                                   !foc || !th->syn || (opsize & 1))
                                        break;
                                foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
                                if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
@@ -4669,7 +4669,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        struct sk_buff *head;
        u32 start, end;
 
-       if (skb == NULL)
+       if (!skb)
                return;
 
        start = TCP_SKB_CB(skb)->seq;
@@ -5124,7 +5124,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (unlikely(sk->sk_rx_dst == NULL))
+       if (unlikely(!sk->sk_rx_dst))
                inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
        /*
         *      Header prediction.
@@ -5321,7 +5321,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
 
        tcp_set_state(sk, TCP_ESTABLISHED);
 
-       if (skb != NULL) {
+       if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
                security_inet_conn_established(sk, skb);
        }
@@ -5690,11 +5690,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        req = tp->fastopen_rsk;
-       if (req != NULL) {
+       if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
                    sk->sk_state != TCP_FIN_WAIT1);
 
-               if (tcp_check_req(sk, skb, req, true) == NULL)
+               if (!tcp_check_req(sk, skb, req, true))
                        goto discard;
        }
 
@@ -5780,7 +5780,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                 * ACK we have received, this would have acknowledged
                 * our SYNACK so stop the SYNACK timer.
                 */
-               if (req != NULL) {
+               if (req) {
                        /* Return RST if ack_seq is invalid.
                         * Note that RFC793 only says to generate a
                         * DUPACK for it but for TCP Fast Open it seems