queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+
if (!ctx->stat[BLK_STAT_READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples)
continue;
if (q->mq_ops)
blk_mq_stat_get(q, dst);
else {
+ blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
+ blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
sizeof(struct blk_rq_stat));
memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
uint64_t newest = 0;
hctx_for_each_ctx(hctx, ctx, i) {
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+
if (!ctx->stat[BLK_STAT_READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples)
continue;