]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Revert "Merge branch 'stable/for-jens-3.2' into linux-next"
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tue, 11 Oct 2011 19:05:27 +0000 (15:05 -0400)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tue, 11 Oct 2011 19:05:27 +0000 (15:05 -0400)
This reverts commit b0b69124182bed93c9ee46726faeb92cca44e461, reversing
changes made to e54339ce6a3d094789ad74a78548799f2fddf521.

drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
include/xen/interface/io/blkif.h

index 7a406007710a0fbaf51c5b4103d5aa16067182bf..a5e7734348e0f7c2fd0626dbf5e484f806a296d4 100644 (file)
@@ -422,16 +422,13 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
        int status = BLKIF_RSP_OKAY;
        struct block_device *bdev = blkif->vbd.bdev;
 
-       if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) {
-               unsigned long secure = (blkif->vbd.discard_secure &&
-                       (req->u.discard.flag & BLKIF_OP_DISCARD_FLAG_SECURE)) ?
-                       BLKDEV_DISCARD_SECURE : 0;
+       if (blkif->blk_backend_type == BLKIF_BACKEND_PHY)
                /* just forward the discard request */
                err = blkdev_issue_discard(bdev,
                                req->u.discard.sector_number,
                                req->u.discard.nr_sectors,
-                               GFP_KERNEL, secure);
-       else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
+                               GFP_KERNEL, 0);
+       else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
                /* punch a hole in the backing file */
                struct loop_device *lo = bdev->bd_disk->private_data;
                struct file *file = lo->lo_backing_file;
@@ -455,23 +452,6 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
        make_response(blkif, req->id, req->operation, status);
 }
 
-static void xen_blk_drain_io(struct xen_blkif *blkif)
-{
-       atomic_set(&blkif->drain, 1);
-       do {
-               wait_for_completion_interruptible_timeout(
-                               &blkif->drain_complete, HZ);
-
-               if (!atomic_read(&blkif->drain))
-                       break;
-               /* The initial value is one, and one refcnt taken at the
-                * start of the xen_blkif_schedule thread. */
-               if (atomic_read(&blkif->refcnt) <= 2)
-                       break;
-       } while (!kthread_should_stop());
-       atomic_set(&blkif->drain, 0);
-}
-
 /*
  * Completion callback on the bio's. Called as bh->b_end_io()
  */
@@ -484,11 +464,6 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
                pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
                xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
-       } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
-                   (error == -EOPNOTSUPP)) {
-               pr_debug(DRV_PFX "write barrier op failed, not supported\n");
-               xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
-               pending_req->status = BLKIF_RSP_EOPNOTSUPP;
        } else if (error) {
                pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
                         " error=%d\n", error);
@@ -506,10 +481,6 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
                              pending_req->operation, pending_req->status);
                xen_blkif_put(pending_req->blkif);
                free_req(pending_req);
-               if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
-                       if (atomic_read(&pending_req->blkif->drain))
-                               complete(&pending_req->blkif->drain_complete);
-               }
        }
 }
 
@@ -603,6 +574,7 @@ do_block_io_op(struct xen_blkif *blkif)
 
        return more_to_do;
 }
+
 /*
  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
  * and call the 'submit_bio' to pass it to the underlying storage.
@@ -619,7 +591,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
        int i, nbio = 0;
        int operation;
        struct blk_plug plug;
-       bool drain = false;
 
        switch (req->operation) {
        case BLKIF_OP_READ:
@@ -630,8 +601,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                blkif->st_wr_req++;
                operation = WRITE_ODIRECT;
                break;
-       case BLKIF_OP_WRITE_BARRIER:
-               drain = true;
        case BLKIF_OP_FLUSH_DISKCACHE:
                blkif->st_f_req++;
                operation = WRITE_FLUSH;
@@ -640,6 +609,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                blkif->st_ds_req++;
                operation = REQ_DISCARD;
                break;
+       case BLKIF_OP_WRITE_BARRIER:
        default:
                operation = 0; /* make gcc happy */
                goto fail_response;
@@ -698,19 +668,13 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                }
        }
 
-       /* Wait on all outstanding I/O's and once that has been completed
-        * issue the WRITE_FLUSH.
-        */
-       if (drain)
-               xen_blk_drain_io(pending_req->blkif);
-
        /*
         * If we have failed at this point, we need to undo the M2P override,
         * set gnttab_set_unmap_op on all of the grant references and perform
         * the hypercall to unmap the grants - that is all done in
         * xen_blkbk_unmap.
         */
-       if (operation != REQ_DISCARD &&
+       if (operation != BLKIF_OP_DISCARD &&
                        xen_blkbk_map(req, pending_req, seg))
                goto fail_flush;
 
@@ -779,7 +743,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
-       else if (operation == WRITE_ODIRECT || operation == WRITE_FLUSH)
+       else if (operation == WRITE || operation == WRITE_FLUSH)
                blkif->st_wr_sect += preq.nr_sects;
 
        return 0;
index 43b72a78a326d5208af33c76dfe003f444ea9028..1b1bc44586853ece68494ecf0b823b6b6d70d1e1 100644 (file)
@@ -72,7 +72,6 @@ struct blkif_x86_32_request_rw {
 struct blkif_x86_32_request_discard {
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
        uint64_t nr_sectors;
-       uint32_t flag;
 };
 
 struct blkif_x86_32_request {
@@ -102,7 +101,6 @@ struct blkif_x86_64_request_rw {
 struct blkif_x86_64_request_discard {
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
        uint64_t nr_sectors;
-       uint32_t flag;
 };
 
 struct blkif_x86_64_request {
@@ -159,7 +157,6 @@ struct xen_vbd {
        /* Cached size parameter. */
        sector_t                size;
        bool                    flush_support;
-       bool                    discard_secure;
 };
 
 struct backend_info;
@@ -184,9 +181,6 @@ struct xen_blkif {
        atomic_t                refcnt;
 
        wait_queue_head_t       wq;
-       /* for barrier (drain) requests */
-       struct completion       drain_complete;
-       atomic_t                drain;
        /* One thread per one blkif. */
        struct task_struct      *xenblkd;
        unsigned int            waiting_reqs;
@@ -235,8 +229,6 @@ int xen_blkif_schedule(void *arg);
 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
                              struct backend_info *be, int state);
 
-int xen_blkbk_barrier(struct xenbus_transaction xbt,
-                     struct backend_info *be, int state);
 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
 
 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
@@ -262,7 +254,6 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
        case BLKIF_OP_DISCARD:
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
-               dst->u.discard.flag = src->u.discard.flag;
                break;
        default:
                break;
@@ -292,7 +283,6 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
        case BLKIF_OP_DISCARD:
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
-               dst->u.discard.flag = src->u.discard.flag;
                break;
        default:
                break;
index fa8cd40b064ef9a373de33ee67a9974b34f56e06..3809f93478e81cfe567ef2f3866a76dea6f913b7 100644 (file)
@@ -114,8 +114,6 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
        spin_lock_init(&blkif->blk_ring_lock);
        atomic_set(&blkif->refcnt, 1);
        init_waitqueue_head(&blkif->wq);
-       init_completion(&blkif->drain_complete);
-       atomic_set(&blkif->drain, 0);
        blkif->st_print = jiffies;
        init_waitqueue_head(&blkif->waiting_to_free);
 
@@ -378,9 +376,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
        if (q && q->flush_flags)
                vbd->flush_support = true;
 
-       if (q && blk_queue_secdiscard(q))
-               vbd->discard_secure = true;
-
        DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
                handle, blkif->domid);
        return 0;
@@ -463,15 +458,6 @@ int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
                                state = 1;
                                blkif->blk_backend_type = BLKIF_BACKEND_PHY;
                        }
-                       /* Optional. */
-                       err = xenbus_printf(xbt, dev->nodename,
-                               "discard-secure", "%d",
-                               blkif->vbd.discard_secure);
-                       if (err) {
-                               xenbus_dev_fatal(dev, err,
-                                       "writting discard-secure");
-                               goto kfree;
-                       }
                }
        } else {
                err = PTR_ERR(type);
@@ -488,19 +474,6 @@ kfree:
 out:
        return err;
 }
-int xen_blkbk_barrier(struct xenbus_transaction xbt,
-                     struct backend_info *be, int state)
-{
-       struct xenbus_device *dev = be->dev;
-       int err;
-
-       err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
-                           "%d", state);
-       if (err)
-               xenbus_dev_fatal(dev, err, "writing feature-barrier");
-
-       return err;
-}
 
 /*
  * Entry point to this code when a new device is created.  Allocate the basic
@@ -735,9 +708,6 @@ again:
 
        err = xen_blkbk_discard(xbt, be);
 
-       /* If we can't advertise it is OK. */
-       err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
-
        err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
                            (unsigned long long)vbd_sz(&be->blkif->vbd));
        if (err) {
index 807b7b6d6fa38be26ebe0ae47be80c0dc9d7f31f..e9d301c564577c4e5a1105ca037234186c52ccdd 100644 (file)
@@ -98,8 +98,7 @@ struct blkfront_info
        unsigned long shadow_free;
        unsigned int feature_flush;
        unsigned int flush_op;
-       unsigned int feature_discard:1;
-       unsigned int feature_secdiscard:1;
+       unsigned int feature_discard;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
        int is_ready;
@@ -306,13 +305,11 @@ static int blkif_queue_request(struct request *req)
                ring_req->operation = info->flush_op;
        }
 
-       if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
+       if (unlikely(req->cmd_flags & REQ_DISCARD)) {
                /* id, sector_number and handle are set above. */
                ring_req->operation = BLKIF_OP_DISCARD;
                ring_req->nr_segments = 0;
                ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
-               if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
-                       ring_req->u.discard.flag = BLKIF_OP_DISCARD_FLAG_SECURE;
        } else {
                ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
                BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -427,8 +424,6 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
                blk_queue_max_discard_sectors(rq, get_capacity(gd));
                rq->limits.discard_granularity = info->discard_granularity;
                rq->limits.discard_alignment = info->discard_alignment;
-               if (info->feature_secdiscard)
-                       queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
        }
 
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -754,9 +749,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                           info->gd->disk_name);
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
-                               info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
-                               queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
                        }
                        __blk_end_request_all(req, error);
                        break;
@@ -1142,13 +1135,11 @@ static void blkfront_setup_discard(struct blkfront_info *info)
        char *type;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
-       unsigned int discard_secure;
 
        type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
        if (IS_ERR(type))
                return;
 
-       info->feature_secdiscard = 0;
        if (strncmp(type, "phy", 3) == 0) {
                err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
                        "discard-granularity", "%u", &discard_granularity,
@@ -1159,12 +1150,6 @@ static void blkfront_setup_discard(struct blkfront_info *info)
                        info->discard_granularity = discard_granularity;
                        info->discard_alignment = discard_alignment;
                }
-               err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "discard-secure", "%d", &discard_secure,
-                           NULL);
-               if (!err)
-                       info->feature_secdiscard = discard_secure;
-
        } else if (strncmp(type, "file", 4) == 0)
                info->feature_discard = 1;
 
@@ -1465,8 +1450,6 @@ static struct xenbus_driver blkfront = {
 
 static int __init xlblk_init(void)
 {
-       int ret;
-
        if (!xen_domain())
                return -ENODEV;
 
@@ -1476,13 +1459,7 @@ static int __init xlblk_init(void)
                return -ENODEV;
        }
 
-       ret = xenbus_register_frontend(&blkfront);
-       if (ret) {
-               unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
-               return ret;
-       }
-
-       return 0;
+       return xenbus_register_frontend(&blkfront);
 }
 module_init(xlblk_init);
 
index 13d040e0537c8f7755f9bcafe0aa5db5e53979dc..9324488f23f0b2eaab58e50f7cdda0b6d4b152ef 100644 (file)
@@ -84,10 +84,6 @@ typedef uint64_t blkif_sector_t;
  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
  * http://www.seagate.com/staticfiles/support/disc/manuals/
  *     Interface%20manuals/100293068c.pdf
- * We also provide three extra XenBus options to the discard operation:
- * 'discard-granularity' - Max amount of sectors that can be discarded.
- * 'discard-alignment' - 4K, 128K, etc aligment on sectors to erased.
- * 'discard-secure' - whether the discard can also securely erase data.
  */
 #define BLKIF_OP_DISCARD           5
 
@@ -111,8 +107,6 @@ struct blkif_request_rw {
 struct blkif_request_discard {
        blkif_sector_t sector_number;
        uint64_t nr_sectors;
-#define BLKIF_OP_DISCARD_FLAG_SECURE   (1<<1) /* ignored if discard-secure=0 */
-       uint32_t flag;
 };
 
 struct blkif_request {