]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - net/core/skbuff.c
Revert "Apply SCST exec req fifo patch"
[mv-sheeva.git] / net / core / skbuff.c
index 104f8444754aa160fe7922458980ddb57485a2a5..c2b64fbee58ec1e154c57bc677f0e493a1417616 100644 (file)
@@ -76,13 +76,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
                                  struct pipe_buffer *buf)
 {
-       put_page(buf->page);
+       net_put_page(buf->page);
 }
 
 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
-       get_page(buf->page);
+       net_get_page(buf->page);
 }
 
 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -324,7 +324,7 @@ static void skb_release_data(struct sk_buff *skb)
                if (skb_shinfo(skb)->nr_frags) {
                        int i;
                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-                               put_page(skb_shinfo(skb)->frags[i].page);
+                               net_put_page(skb_shinfo(skb)->frags[i].page);
                }
 
                if (skb_has_frag_list(skb))
@@ -730,7 +730,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
-                       get_page(skb_shinfo(n)->frags[i].page);
+                       net_get_page(skb_shinfo(n)->frags[i].page);
                }
                skb_shinfo(n)->nr_frags = i;
        }
@@ -806,7 +806,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
                kfree(skb->head);
        } else {
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-                       get_page(skb_shinfo(skb)->frags[i].page);
+                       net_get_page(skb_shinfo(skb)->frags[i].page);
 
                if (skb_has_frag_list(skb))
                        skb_clone_fraglist(skb);
@@ -1083,7 +1083,7 @@ drop_pages:
                skb_shinfo(skb)->nr_frags = i;
 
                for (; i < nfrags; i++)
-                       put_page(skb_shinfo(skb)->frags[i].page);
+                       net_put_page(skb_shinfo(skb)->frags[i].page);
 
                if (skb_has_frag_list(skb))
                        skb_drop_fraglist(skb);
@@ -1252,7 +1252,7 @@ pull_pages:
        k = 0;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                if (skb_shinfo(skb)->frags[i].size <= eat) {
-                       put_page(skb_shinfo(skb)->frags[i].page);
+                       net_put_page(skb_shinfo(skb)->frags[i].page);
                        eat -= skb_shinfo(skb)->frags[i].size;
                } else {
                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -1353,7 +1353,7 @@ EXPORT_SYMBOL(skb_copy_bits);
  */
 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 {
-       put_page(spd->pages[i]);
+       net_put_page(spd->pages[i]);
 }
 
 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
@@ -1377,7 +1377,7 @@ new_page:
                off = sk->sk_sndmsg_off;
                mlen = PAGE_SIZE - off;
                if (mlen < 64 && mlen < *len) {
-                       put_page(p);
+                       net_put_page(p);
                        goto new_page;
                }
 
@@ -1387,7 +1387,7 @@ new_page:
        memcpy(page_address(p) + off, page_address(page) + *offset, *len);
        sk->sk_sndmsg_off += *len;
        *offset = off;
-       get_page(p);
+       net_get_page(p);
 
        return p;
 }
@@ -1409,7 +1409,7 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd,
                if (!page)
                        return 1;
        } else
-               get_page(page);
+               net_get_page(page);
 
        spd->pages[spd->nr_pages] = page;
        spd->partial[spd->nr_pages].len = *len;
@@ -2042,7 +2042,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
                                 *    where splitting is expensive.
                                 * 2. Split is accurately. We make this.
                                 */
-                               get_page(skb_shinfo(skb)->frags[i].page);
+                               net_get_page(skb_shinfo(skb)->frags[i].page);
                                skb_shinfo(skb1)->frags[0].page_offset += len - pos;
                                skb_shinfo(skb1)->frags[0].size -= len - pos;
                                skb_shinfo(skb)->frags[i].size  = len - pos;
@@ -2164,7 +2164,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
                        to++;
 
                } else {
-                       get_page(fragfrom->page);
+                       net_get_page(fragfrom->page);
                        fragto->page = fragfrom->page;
                        fragto->page_offset = fragfrom->page_offset;
                        fragto->size = todo;
@@ -2186,7 +2186,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
                fragto = &skb_shinfo(tgt)->frags[merge];
 
                fragto->size += fragfrom->size;
-               put_page(fragfrom->page);
+               net_put_page(fragfrom->page);
        }
 
        /* Reposition in the original skb */
@@ -2587,7 +2587,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 
                while (pos < offset + len && i < nfrags) {
                        *frag = skb_shinfo(skb)->frags[i];
-                       get_page(frag->page);
+                       net_get_page(frag->page);
                        size = frag->size;
 
                        if (pos < offset) {