summaryrefslogtreecommitdiff
path: root/include/xen
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-02-10 12:52:34 -0700
committerJens Axboe <axboe@fb.com>2014-02-10 12:52:34 -0700
commit9d4cb8e3a5b448d802155688bf2d50ac1b9e1a51 (patch)
tree95f3ee193b9f6a248001cee13e1f4ea23704ca45 /include/xen
parent11c94444074f40b479a05f6657d935204e992f2e (diff)
parent3661371701e714f0cea4120f6a365340858fb4e4 (diff)
Merge branch 'stable/for-jens-3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip into for-linus
Konrad writes: Please git pull the following branch: git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git stable/for-jens-3.14 which is based off v3.13-rc6. If you would like me to rebase it on a different branch/tag I would be more than happy to do so. The patches are all bug-fixes and hopefully can go in 3.14. They deal with xen-blkback shutdown and cause memory leaks as well as shutdown races. They should go to stable tree and if you are OK with I will ask them to backport those fixes. There is also a fix to xen-blkfront to deal with unexpected state transition. And lastly a fix to the header where it was using the __aligned__ unnecessarily.
Diffstat (limited to 'include/xen')
-rw-r--r--include/xen/interface/io/blkif.h34
1 files changed, 14 insertions, 20 deletions
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac59c36..32ec05a6572f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
* it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments.
- * This pages are filled with an array of blkif_request_segment_aligned
- * that hold the information about the segments. The number of indirect
- * pages to use is determined by the maximum number of segments
- * a indirect request contains. Every indirect page can contain a maximum
- * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
- * so to calculate the number of indirect pages to use we have to do
- * ceil(indirect_segments/512).
+ * These pages are filled with an array of blkif_request_segment that hold the
+ * information about the segments. The number of indirect pages to use is
+ * determined by the number of segments an indirect request contains. Every
+ * indirect page can contain a maximum of
+ * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
+ * calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
*
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
-struct blkif_request_segment_aligned {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
-} __attribute__((__packed__));
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
#endif
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__));
struct blkif_request_discard {