}
EXPORT_SYMBOL_GPL(blk_queue_flush);
+void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
+{
+ q->flush_not_queueable = !queueable;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
* for flush operations
*/
unsigned int flush_flags;
+ unsigned int flush_not_queueable:1;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
unsigned long flush_pending_since;
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
+extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
return bdev->bd_block_size;
}
+static inline bool queue_flush_queueable(struct request_queue *q)
+{
+ return !q->flush_not_queueable;
+}
+
typedef struct {struct page *v;} Sector;
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);