]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Revert "Apply SCST exec req fifo patch"
authorNils Faerber <nils.faerber@kernelconcepts.de>
Tue, 11 Jan 2011 16:22:12 +0000 (17:22 +0100)
committerNils Faerber <nils.faerber@kernelconcepts.de>
Tue, 11 Jan 2011 16:22:12 +0000 (17:22 +0100)
This reverts commit 949dd53a6c529a72511b2340917546a12c58de05.

14 files changed:
block/blk-map.c
include/linux/blkdev.h
include/linux/mm_types.h
include/linux/net.h
include/linux/scatterlist.h
lib/scatterlist.c
net/Kconfig
net/core/dev.c
net/core/skbuff.c
net/ipv4/Makefile
net/ipv4/ip_output.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv6/ip6_output.c

index e663ac2d8e68f70ff17ce274f3cebec16c1dd18c..77bf1b7a5674df598d664d7d3f2ea421ddfa0af4 100644 (file)
@@ -5,6 +5,8 @@
 #include <linux/module.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
 #include <scsi/sg.h>           /* for struct sg_iovec */
 
 #include "blk.h"
@@ -274,6 +276,337 @@ int blk_rq_unmap_user(struct bio *bio)
 }
 EXPORT_SYMBOL(blk_rq_unmap_user);
 
+struct blk_kern_sg_work {
+       atomic_t bios_inflight;
+       struct sg_table sg_table;
+       struct scatterlist *src_sgl;
+};
+
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
+{
+       sg_free_table(&bw->sg_table);
+       kfree(bw);
+       return;
+}
+
+static void blk_bio_map_kern_endio(struct bio *bio, int err)
+{
+       struct blk_kern_sg_work *bw = bio->bi_private;
+
+       if (bw != NULL) {
+               /* Decrement the bios in processing and, if zero, free */
+               BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
+               if (atomic_dec_and_test(&bw->bios_inflight)) {
+                       if ((bio_data_dir(bio) == READ) && (err == 0)) {
+                               unsigned long flags;
+
+                               local_irq_save(flags);  /* to protect KMs */
+                               sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
+                                       KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
+                               local_irq_restore(flags);
+                       }
+                       blk_free_kern_sg_work(bw);
+               }
+       }
+
+       bio_put(bio);
+       return;
+}
+
+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
+                              int nents, struct blk_kern_sg_work **pbw,
+                              gfp_t gfp, gfp_t page_gfp)
+{
+       int res = 0, i;
+       struct scatterlist *sg;
+       struct scatterlist *new_sgl;
+       int new_sgl_nents;
+       size_t len = 0, to_copy;
+       struct blk_kern_sg_work *bw;
+
+       bw = kzalloc(sizeof(*bw), gfp);
+       if (bw == NULL)
+               goto out;
+
+       bw->src_sgl = sgl;
+
+       for_each_sg(sgl, sg, nents, i)
+               len += sg->length;
+       to_copy = len;
+
+       new_sgl_nents = PFN_UP(len);
+
+       res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
+       if (res != 0)
+               goto out_free_bw;
+
+       new_sgl = bw->sg_table.sgl;
+
+       for_each_sg(new_sgl, sg, new_sgl_nents, i) {
+               struct page *pg;
+
+               pg = alloc_page(page_gfp);
+               if (pg == NULL)
+                       goto err_free_new_sgl;
+
+               sg_assign_page(sg, pg);
+               sg->length = min_t(size_t, PAGE_SIZE, len);
+
+               len -= PAGE_SIZE;
+       }
+
+       if (rq_data_dir(rq) == WRITE) {
+               /*
+                * We need to limit amount of copied data to to_copy, because
+                * sgl might have the last element in sgl not marked as last in
+                * SG chaining.
+                */
+               sg_copy(new_sgl, sgl, 0, to_copy,
+                       KM_USER0, KM_USER1);
+       }
+
+       *pbw = bw;
+       /*
+        * REQ_COPY_USER name is misleading. It should be something like
+        * REQ_HAS_TAIL_SPACE_FOR_PADDING.
+        */
+       rq->cmd_flags |= REQ_COPY_USER;
+
+out:
+       return res;
+
+err_free_new_sgl:
+       for_each_sg(new_sgl, sg, new_sgl_nents, i) {
+               struct page *pg = sg_page(sg);
+               if (pg == NULL)
+                       break;
+               __free_page(pg);
+       }
+       sg_free_table(&bw->sg_table);
+
+out_free_bw:
+       kfree(bw);
+       res = -ENOMEM;
+       goto out;
+}
+
+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
+       int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
+{
+       int res;
+       struct request_queue *q = rq->q;
+       int rw = rq_data_dir(rq);
+       int max_nr_vecs, i;
+       size_t tot_len;
+       bool need_new_bio;
+       struct scatterlist *sg, *prev_sg = NULL;
+       struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+       int bios;
+
+       if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
+               WARN_ON(1);
+               res = -EINVAL;
+               goto out;
+       }
+
+       /*
+        * Let's keep each bio allocation inside a single page to decrease
+        * probability of failure.
+        */
+       max_nr_vecs =  min_t(size_t,
+               ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
+               BIO_MAX_PAGES);
+
+       need_new_bio = true;
+       tot_len = 0;
+       bios = 0;
+       for_each_sg(sgl, sg, nents, i) {
+               struct page *page = sg_page(sg);
+               void *page_addr = page_address(page);
+               size_t len = sg->length, l;
+               size_t offset = sg->offset;
+
+               tot_len += len;
+               prev_sg = sg;
+
+               /*
+                * Each segment must be aligned on DMA boundary and
+                * not on stack. The last one may have unaligned
+                * length as long as the total length is aligned to
+                * DMA padding alignment.
+                */
+               if (i == nents - 1)
+                       l = 0;
+               else
+                       l = len;
+               if (((sg->offset | l) & queue_dma_alignment(q)) ||
+                   (page_addr && object_is_on_stack(page_addr + sg->offset))) {
+                       res = -EINVAL;
+                       goto out_free_bios;
+               }
+
+               while (len > 0) {
+                       size_t bytes;
+                       int rc;
+
+                       if (need_new_bio) {
+                               bio = bio_kmalloc(gfp, max_nr_vecs);
+                               if (bio == NULL) {
+                                       res = -ENOMEM;
+                                       goto out_free_bios;
+                               }
+
+                               if (rw == WRITE)
+                                       bio->bi_rw |= REQ_WRITE;
+
+                               bios++;
+                               bio->bi_private = bw;
+                               bio->bi_end_io = blk_bio_map_kern_endio;
+
+                               if (hbio == NULL)
+                                       hbio = tbio = bio;
+                               else
+                                       tbio = tbio->bi_next = bio;
+                       }
+
+                       bytes = min_t(size_t, len, PAGE_SIZE - offset);
+
+                       rc = bio_add_pc_page(q, bio, page, bytes, offset);
+                       if (rc < bytes) {
+                               if (unlikely(need_new_bio || (rc < 0))) {
+                                       if (rc < 0)
+                                               res = rc;
+                                       else
+                                               res = -EIO;
+                                       goto out_free_bios;
+                               } else {
+                                       need_new_bio = true;
+                                       len -= rc;
+                                       offset += rc;
+                                       continue;
+                               }
+                       }
+
+                       need_new_bio = false;
+                       offset = 0;
+                       len -= bytes;
+                       page = nth_page(page, 1);
+               }
+       }
+
+       if (hbio == NULL) {
+               res = -EINVAL;
+               goto out_free_bios;
+       }
+
+       /* Total length must be aligned on DMA padding alignment */
+       if ((tot_len & q->dma_pad_mask) &&
+           !(rq->cmd_flags & REQ_COPY_USER)) {
+               res = -EINVAL;
+               goto out_free_bios;
+       }
+
+       if (bw != NULL)
+               atomic_set(&bw->bios_inflight, bios);
+
+       while (hbio != NULL) {
+               bio = hbio;
+               hbio = hbio->bi_next;
+               bio->bi_next = NULL;
+
+               blk_queue_bounce(q, &bio);
+
+               res = blk_rq_append_bio(q, rq, bio);
+               if (unlikely(res != 0)) {
+                       bio->bi_next = hbio;
+                       hbio = bio;
+                       /* We can have one or more bios bounced */
+                       goto out_unmap_bios;
+               }
+       }
+
+       res = 0;
+
+       rq->buffer = NULL;
+out:
+       return res;
+
+out_unmap_bios:
+       blk_rq_unmap_kern_sg(rq, res);
+
+out_free_bios:
+       while (hbio != NULL) {
+               bio = hbio;
+               hbio = hbio->bi_next;
+               bio_put(bio);
+       }
+       goto out;
+}
+
+/**
+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
+ * @rq:                request to fill
+ * @sgl:       area to map
+ * @nents:     number of elements in @sgl
+ * @gfp:       memory allocation flags
+ *
+ * Description:
+ *    Data will be mapped directly if possible. Otherwise a bounce
+ *    buffer will be used.
+ */
+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
+                      int nents, gfp_t gfp)
+{
+       int res;
+
+       res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
+       if (unlikely(res != 0)) {
+               struct blk_kern_sg_work *bw = NULL;
+
+               res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
+                               gfp, rq->q->bounce_gfp | gfp);
+               if (unlikely(res != 0))
+                       goto out;
+
+               res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
+                               bw->sg_table.nents, bw, gfp);
+               if (res != 0) {
+                       blk_free_kern_sg_work(bw);
+                       goto out;
+               }
+       }
+
+       rq->buffer = NULL;
+
+out:
+       return res;
+}
+EXPORT_SYMBOL(blk_rq_map_kern_sg);
+
+/**
+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
+ * @rq:                request to unmap
+ * @err:       non-zero error code
+ *
+ * Description:
+ *    Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
+ *    only in case of an error!
+ */
+void blk_rq_unmap_kern_sg(struct request *rq, int err)
+{
+       struct bio *bio = rq->bio;
+
+       while (bio) {
+               struct bio *b = bio;
+               bio = bio->bi_next;
+               b->bi_end_io(b, err);
+       }
+       rq->bio = NULL;
+
+       return;
+}
+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
+
 /**
  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
  * @q:         request queue where request should be inserted
index 36ab42c9bb991566dc5004fad9ca3c0c958bfeb0..b22758f78259579eac7e3c52a3d0ae7cdbe5cb83 100644 (file)
@@ -592,6 +592,8 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
 #define BLK_MIN_SG_TIMEOUT     (7 * HZ)
 
+#define SCSI_EXEC_REQ_FIFO_DEFINED
+
 #ifdef CONFIG_BOUNCE
 extern int init_emergency_isa_pool(void);
 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
@@ -709,6 +711,9 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
                               struct rq_map_data *, struct sg_iovec *, int,
                               unsigned int, gfp_t);
+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
+                             int nents, gfp_t gfp);
+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
                          struct request *, int);
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
index bb7288a782fde2ff1d19978a7b9cb387476575ba..9fa6a60fdab22e422cd5d54caf2c2ec26ef1ad61 100644 (file)
@@ -100,6 +100,18 @@ struct page {
         */
        void *shadow;
 #endif
+
+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
+       /*
+        * Used to implement support for notification on zero-copy TCP transfer
+        * completion. It might look as not good to have this field here and
+        * it's better to have it in struct sk_buff, but it would make the code
+        * much more complicated and fragile, since all skb then would have to
+        * contain only pages with the same value in this field.
+        */
+        void *net_priv;
+#endif
+
 };
 
 /*
index 16faa130088c298fe8540b7fa29d365d0ea579b7..4c7c564bf558edbc1ca272e274b9fe27222d7360 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/socket.h>
 #include <asm/socket.h>
+#include <linux/mm.h>
 
 #define NPROTO         AF_MAX
 
@@ -293,5 +294,44 @@ extern int kernel_sock_shutdown(struct socket *sock,
 extern struct ratelimit_state net_ratelimit_state;
 #endif
 
+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
+/* Support for notification on zero-copy TCP transfer completion */
+typedef void (*net_get_page_callback_t)(struct page *page);
+typedef void (*net_put_page_callback_t)(struct page *page);
+
+extern net_get_page_callback_t net_get_page_callback;
+extern net_put_page_callback_t net_put_page_callback;
+
+extern int net_set_get_put_page_callbacks(
+       net_get_page_callback_t get_callback,
+       net_put_page_callback_t put_callback);
+
+/*
+ * See comment for net_set_get_put_page_callbacks() why those functions
+ * don't need any protection.
+ */
+static inline void net_get_page(struct page *page)
+{
+       if (page->net_priv != 0)
+               net_get_page_callback(page);
+       get_page(page);
+}
+static inline void net_put_page(struct page *page)
+{
+       if (page->net_priv != 0)
+               net_put_page_callback(page);
+       put_page(page);
+}
+#else
+static inline void net_get_page(struct page *page)
+{
+       get_page(page);
+}
+static inline void net_put_page(struct page *page)
+{
+       put_page(page);
+}
+#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_NET_H */
index 9aaf5bfdad1afcb5e85b39482ea0458a0ea6b83c..01779961c40c979e8541a48c098650f03f1521fd 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <asm/types.h>
 #include <asm/scatterlist.h>
+#include <asm/kmap_types.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <asm/io.h>
@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
                         void *buf, size_t buflen);
 
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
+           int nents_to_copy, size_t copy_len,
+           enum km_type d_km_type, enum km_type s_km_type);
+
 /*
  * Maximum number of entries that will be allocated in one piece, if
  * a list larger than this is required then chaining will be utilized.
index 4ceb05d772aed12d392d618358284ea71cb51dd2..caf58c49d88046c11e46848a4d1c6c1ea4e422fc 100644 (file)
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
        return sg_copy_buffer(sgl, nents, buf, buflen, 1);
 }
 EXPORT_SYMBOL(sg_copy_to_buffer);
+
+/*
+ * Can switch to the next dst_sg element, so, to copy to strictly only
+ * one dst_sg element, it must be either last in the chain, or
+ * copy_len == dst_sg->length.
+ */
+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
+                       size_t *pdst_offs, struct scatterlist *src_sg,
+                       size_t copy_len,
+                       enum km_type d_km_type, enum km_type s_km_type)
+{
+       int res = 0;
+       struct scatterlist *dst_sg;
+       size_t src_len, dst_len, src_offs, dst_offs;
+       struct page *src_page, *dst_page;
+
+       dst_sg = *pdst_sg;
+       dst_len = *pdst_len;
+       dst_offs = *pdst_offs;
+       dst_page = sg_page(dst_sg);
+
+       src_page = sg_page(src_sg);
+       src_len = src_sg->length;
+       src_offs = src_sg->offset;
+
+       do {
+               void *saddr, *daddr;
+               size_t n;
+
+               saddr = kmap_atomic(src_page +
+                                        (src_offs >> PAGE_SHIFT), s_km_type) +
+                                   (src_offs & ~PAGE_MASK);
+               daddr = kmap_atomic(dst_page +
+                                       (dst_offs >> PAGE_SHIFT), d_km_type) +
+                                   (dst_offs & ~PAGE_MASK);
+
+               if (((src_offs & ~PAGE_MASK) == 0) &&
+                   ((dst_offs & ~PAGE_MASK) == 0) &&
+                   (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
+                   (copy_len >= PAGE_SIZE)) {
+                       copy_page(daddr, saddr);
+                       n = PAGE_SIZE;
+               } else {
+                       n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
+                                         PAGE_SIZE - (src_offs & ~PAGE_MASK));
+                       n = min(n, src_len);
+                       n = min(n, dst_len);
+                       n = min_t(size_t, n, copy_len);
+                       memcpy(daddr, saddr, n);
+               }
+               dst_offs += n;
+               src_offs += n;
+
+               kunmap_atomic(saddr, s_km_type);
+               kunmap_atomic(daddr, d_km_type);
+
+               res += n;
+               copy_len -= n;
+               if (copy_len == 0)
+                       goto out;
+
+               src_len -= n;
+               dst_len -= n;
+               if (dst_len == 0) {
+                       dst_sg = sg_next(dst_sg);
+                       if (dst_sg == NULL)
+                               goto out;
+                       dst_page = sg_page(dst_sg);
+                       dst_len = dst_sg->length;
+                       dst_offs = dst_sg->offset;
+               }
+       } while (src_len > 0);
+
+out:
+       *pdst_sg = dst_sg;
+       *pdst_len = dst_len;
+       *pdst_offs = dst_offs;
+       return res;
+}
+
+/**
+ * sg_copy - copy one SG vector to another
+ * @dst_sg:    destination SG
+ * @src_sg:    source SG
+ * @nents_to_copy: maximum number of entries to copy
+ * @copy_len:  maximum amount of data to copy. If 0, then copy all.
+ * @d_km_type: kmap_atomic type for the destination SG
+ * @s_km_type: kmap_atomic type for the source SG
+ *
+ * Description:
+ *    Data from the source SG vector will be copied to the destination SG
+ *    vector. End of the vectors will be determined by sg_next() returning
+ *    NULL. Returns number of bytes copied.
+ */
+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
+           int nents_to_copy, size_t copy_len,
+           enum km_type d_km_type, enum km_type s_km_type)
+{
+       int res = 0;
+       size_t dst_len, dst_offs;
+
+       if (copy_len == 0)
+               copy_len = 0x7FFFFFFF; /* copy all */
+
+       if (nents_to_copy == 0)
+               nents_to_copy = 0x7FFFFFFF; /* copy all */
+
+       dst_len = dst_sg->length;
+       dst_offs = dst_sg->offset;
+
+       do {
+               int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
+                               src_sg, copy_len, d_km_type, s_km_type);
+               copy_len -= copied;
+               res += copied;
+               if ((copy_len == 0) || (dst_sg == NULL))
+                       goto out;
+
+               nents_to_copy--;
+               if (nents_to_copy == 0)
+                       goto out;
+
+               src_sg = sg_next(src_sg);
+       } while (src_sg != NULL);
+
+out:
+       return res;
+}
+EXPORT_SYMBOL(sg_copy);
index 55fd82e9ffd91e9fd48878147f3068923373ce16..3bc798bbadbadd2a550f6707dae025539eab92d1 100644 (file)
@@ -72,6 +72,18 @@ config INET
 
          Short answer: say Y.
 
+config TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION
+       bool "TCP/IP zero-copy transfer completion notification"
+        depends on INET
+        default SCST_ISCSI
+       ---help---
+         Adds support for sending a notification upon completion of a
+          zero-copy TCP/IP transfer. This can speed up certain TCP/IP
+          software. Currently this is only used by the iSCSI target driver
+          iSCSI-SCST.
+
+          If unsure, say N.
+
 if INET
 source "net/ipv4/Kconfig"
 source "net/ipv6/Kconfig"
index 0dd54a69dace255fcdf54732d982e8c521c574a5..23ea0f18a5af6fb80ae9f039c31f7f68a50b218f 100644 (file)
@@ -3225,7 +3225,7 @@ pull:
                skb_shinfo(skb)->frags[0].size -= grow;
 
                if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
-                       put_page(skb_shinfo(skb)->frags[0].page);
+                       net_put_page(skb_shinfo(skb)->frags[0].page);
                        memmove(skb_shinfo(skb)->frags,
                                skb_shinfo(skb)->frags + 1,
                                --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
index 104f8444754aa160fe7922458980ddb57485a2a5..c2b64fbee58ec1e154c57bc677f0e493a1417616 100644 (file)
@@ -76,13 +76,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
                                  struct pipe_buffer *buf)
 {
-       put_page(buf->page);
+       net_put_page(buf->page);
 }
 
 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
-       get_page(buf->page);
+       net_get_page(buf->page);
 }
 
 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -324,7 +324,7 @@ static void skb_release_data(struct sk_buff *skb)
                if (skb_shinfo(skb)->nr_frags) {
                        int i;
                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-                               put_page(skb_shinfo(skb)->frags[i].page);
+                               net_put_page(skb_shinfo(skb)->frags[i].page);
                }
 
                if (skb_has_frag_list(skb))
@@ -730,7 +730,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
-                       get_page(skb_shinfo(n)->frags[i].page);
+                       net_get_page(skb_shinfo(n)->frags[i].page);
                }
                skb_shinfo(n)->nr_frags = i;
        }
@@ -806,7 +806,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
                kfree(skb->head);
        } else {
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-                       get_page(skb_shinfo(skb)->frags[i].page);
+                       net_get_page(skb_shinfo(skb)->frags[i].page);
 
                if (skb_has_frag_list(skb))
                        skb_clone_fraglist(skb);
@@ -1083,7 +1083,7 @@ drop_pages:
                skb_shinfo(skb)->nr_frags = i;
 
                for (; i < nfrags; i++)
-                       put_page(skb_shinfo(skb)->frags[i].page);
+                       net_put_page(skb_shinfo(skb)->frags[i].page);
 
                if (skb_has_frag_list(skb))
                        skb_drop_fraglist(skb);
@@ -1252,7 +1252,7 @@ pull_pages:
        k = 0;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                if (skb_shinfo(skb)->frags[i].size <= eat) {
-                       put_page(skb_shinfo(skb)->frags[i].page);
+                       net_put_page(skb_shinfo(skb)->frags[i].page);
                        eat -= skb_shinfo(skb)->frags[i].size;
                } else {
                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -1353,7 +1353,7 @@ EXPORT_SYMBOL(skb_copy_bits);
  */
 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 {
-       put_page(spd->pages[i]);
+       net_put_page(spd->pages[i]);
 }
 
 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
@@ -1377,7 +1377,7 @@ new_page:
                off = sk->sk_sndmsg_off;
                mlen = PAGE_SIZE - off;
                if (mlen < 64 && mlen < *len) {
-                       put_page(p);
+                       net_put_page(p);
                        goto new_page;
                }
 
@@ -1387,7 +1387,7 @@ new_page:
        memcpy(page_address(p) + off, page_address(page) + *offset, *len);
        sk->sk_sndmsg_off += *len;
        *offset = off;
-       get_page(p);
+       net_get_page(p);
 
        return p;
 }
@@ -1409,7 +1409,7 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd,
                if (!page)
                        return 1;
        } else
-               get_page(page);
+               net_get_page(page);
 
        spd->pages[spd->nr_pages] = page;
        spd->partial[spd->nr_pages].len = *len;
@@ -2042,7 +2042,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
                                 *    where splitting is expensive.
                                 * 2. Split is accurately. We make this.
                                 */
-                               get_page(skb_shinfo(skb)->frags[i].page);
+                               net_get_page(skb_shinfo(skb)->frags[i].page);
                                skb_shinfo(skb1)->frags[0].page_offset += len - pos;
                                skb_shinfo(skb1)->frags[0].size -= len - pos;
                                skb_shinfo(skb)->frags[i].size  = len - pos;
@@ -2164,7 +2164,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
                        to++;
 
                } else {
-                       get_page(fragfrom->page);
+                       net_get_page(fragfrom->page);
                        fragto->page = fragfrom->page;
                        fragto->page_offset = fragfrom->page_offset;
                        fragto->size = todo;
@@ -2186,7 +2186,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
                fragto = &skb_shinfo(tgt)->frags[merge];
 
                fragto->size += fragfrom->size;
-               put_page(fragfrom->page);
+               net_put_page(fragfrom->page);
        }
 
        /* Reposition in the original skb */
@@ -2587,7 +2587,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 
                while (pos < offset + len && i < nfrags) {
                        *frag = skb_shinfo(skb)->frags[i];
-                       get_page(frag->page);
+                       net_get_page(frag->page);
                        size = frag->size;
 
                        if (pos < offset) {
index 4978d22f9a75eafe3fe10d8cf93f6e8a2a7f647a..dee3df7c97ef34f0748d349a8ec4fe5bd78b2454 100644 (file)
@@ -50,6 +50,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
 obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
+obj-$(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) += tcp_zero_copy.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
                      xfrm4_output.o
index 439d2a34ee4411b932eefb3a6fc51383e8db7125..07965860fa92b39bb1cec60362dd1bbb74694d4e 100644 (file)
@@ -1042,7 +1042,7 @@ alloc_new_skb:
                                                err = -EMSGSIZE;
                                                goto error;
                                        }
-                                       get_page(page);
+                                       net_get_page(page);
                                        skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
                                        frag = &skb_shinfo(skb)->frags[i];
                                }
@@ -1201,7 +1201,7 @@ ssize_t   ip_append_page(struct sock *sk, struct page *page,
                if (skb_can_coalesce(skb, i, page, offset)) {
                        skb_shinfo(skb)->frags[i-1].size += len;
                } else if (i < MAX_SKB_FRAGS) {
-                       get_page(page);
+                       net_get_page(page);
                        skb_fill_page_desc(skb, i, page, offset, len);
                } else {
                        err = -EMSGSIZE;
index f15c36a706ecb07385d46a7d8eeb1233cfbb2daa..053d21ca2cd9ddc47661fdf62b2da20b60181331 100644 (file)
@@ -806,7 +806,7 @@ new_segment:
                if (can_coalesce) {
                        skb_shinfo(skb)->frags[i - 1].size += copy;
                } else {
-                       get_page(page);
+                       net_get_page(page);
                        skb_fill_page_desc(skb, i, page, offset, copy);
                }
 
@@ -1015,7 +1015,7 @@ new_segment:
                                        goto new_segment;
                                } else if (page) {
                                        if (off == PAGE_SIZE) {
-                                               put_page(page);
+                                               net_put_page(page);
                                                TCP_PAGE(sk) = page = NULL;
                                                off = 0;
                                        }
@@ -1056,9 +1056,9 @@ new_segment:
                                } else {
                                        skb_fill_page_desc(skb, i, page, off, copy);
                                        if (TCP_PAGE(sk)) {
-                                               get_page(page);
+                                               net_get_page(page);
                                        } else if (off + copy < PAGE_SIZE) {
-                                               get_page(page);
+                                               net_get_page(page);
                                                TCP_PAGE(sk) = page;
                                        }
                                }
index 61c2463e2753f809c12f31967e7fa189e41e1f54..143811e7862d073a13dfce0753fc3c09b7742769 100644 (file)
@@ -1082,7 +1082,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
        k = 0;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                if (skb_shinfo(skb)->frags[i].size <= eat) {
-                       put_page(skb_shinfo(skb)->frags[i].page);
+                       net_put_page(skb_shinfo(skb)->frags[i].page);
                        eat -= skb_shinfo(skb)->frags[i].size;
                } else {
                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
index 94b5bf132b2e33a467f662b8c0e402a459f00a3d..9b579635b734fc8c19fbbefc717b70b9c3e4ba4e 100644 (file)
@@ -1383,7 +1383,7 @@ alloc_new_skb:
                                                err = -EMSGSIZE;
                                                goto error;
                                        }
-                                       get_page(page);
+                                       net_get_page(page);
                                        skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
                                        frag = &skb_shinfo(skb)->frags[i];
                                }