]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - include/xen/interface/io/blkif.h
xen-block: implement indirect descriptors
[karo-tx-linux.git] / include / xen / interface / io / blkif.h
index ffd4652de91ca390fff537f18f524300bf470246..65e12099ef89d9f3b5706858d956d8aabe427348 100644 (file)
@@ -102,6 +102,30 @@ typedef uint64_t blkif_sector_t;
  */
 #define BLKIF_OP_DISCARD           5
 
+/*
+ * Recognized if "feature-max-indirect-segments" in present in the backend
+ * xenbus info. The "feature-max-indirect-segments" node contains the maximum
+ * number of segments allowed by the backend per request. If the node is
+ * present, the frontend might use blkif_request_indirect structs in order to
+ * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
+ * maximum number of indirect segments is fixed by the backend, but the
+ * frontend can issue requests with any number of indirect segments as long as
+ * it's less than the number provided by the backend. The indirect_grefs field
+ * in blkif_request_indirect should be filled by the frontend with the
+ * grant references of the pages that are holding the indirect segments.
+ * This pages are filled with an array of blkif_request_segment_aligned
+ * that hold the information about the segments. The number of indirect
+ * pages to use is determined by the maximum number of segments
+ * a indirect request contains. Every indirect page can contain a maximum
+ * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
+ * so to calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments/512).
+ *
+ * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
+ * create the "feature-max-indirect-segments" node!
+ */
+#define BLKIF_OP_INDIRECT          6
+
 /*
  * Maximum scatter/gather segments per request.
  * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
@@ -109,6 +133,16 @@ typedef uint64_t blkif_sector_t;
  */
 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 
+#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
+
+struct blkif_request_segment_aligned {
+       grant_ref_t gref;        /* reference to I/O buffer frame        */
+       /* @first_sect: first sector in frame to transfer (inclusive).   */
+       /* @last_sect: last sector in frame to transfer (inclusive).     */
+       uint8_t     first_sect, last_sect;
+       uint16_t    _pad; /* padding to make it 8 bytes, so it's cache-aligned */
+} __attribute__((__packed__));
+
 struct blkif_request_rw {
        uint8_t        nr_segments;  /* number of segments                   */
        blkif_vdev_t   handle;       /* only for read/write requests         */
@@ -147,12 +181,31 @@ struct blkif_request_other {
        uint64_t     id;           /* private guest value, echoed in resp  */
 } __attribute__((__packed__));
 
+struct blkif_request_indirect {
+       uint8_t        indirect_op;
+       uint16_t       nr_segments;
+#ifdef CONFIG_X86_64
+       uint32_t       _pad1;        /* offsetof(blkif_...,u.indirect.id) == 8 */
+#endif
+       uint64_t       id;
+       blkif_sector_t sector_number;
+       blkif_vdev_t   handle;
+       uint16_t       _pad2;
+       grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+#ifdef CONFIG_X86_64
+       uint32_t      _pad3;         /* make it 64 byte aligned */
+#else
+       uint64_t      _pad3;         /* make it 64 byte aligned */
+#endif
+} __attribute__((__packed__));
+
 struct blkif_request {
        uint8_t        operation;    /* BLKIF_OP_???                         */
        union {
                struct blkif_request_rw rw;
                struct blkif_request_discard discard;
                struct blkif_request_other other;
+               struct blkif_request_indirect indirect;
        } u;
 } __attribute__((__packed__));