Introduce struct dm_io_client to prepare for per-client mempools and bio_sets.
Temporary functions bios() and io_pool() choose between the per-client
structures and the global ones so the old and new interfaces can co-exist.
Make error_bits optional.
Signed-off-by: Heinz Mauelshagen <hjm@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: Milan Broz <mbroz@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
/*
* Copyright (C) 2003 Sistina Software
/*
* Copyright (C) 2003 Sistina Software
+ * Copyright (C) 2006 Red Hat GmbH
*
* This file is released under the GPL.
*/
*
* This file is released under the GPL.
*/
static struct bio_set *_bios;
static struct bio_set *_bios;
+struct dm_io_client {
+ mempool_t *pool;
+ struct bio_set *bios;
+};
+
/* FIXME: can we shrink this ? */
struct io {
unsigned long error;
atomic_t count;
struct task_struct *sleeper;
/* FIXME: can we shrink this ? */
struct io {
unsigned long error;
atomic_t count;
struct task_struct *sleeper;
+ struct dm_io_client *client;
io_notify_fn callback;
void *context;
};
io_notify_fn callback;
void *context;
};
/*
* io contexts are only dynamically allocated for asynchronous
* io. Since async io is likely to be the majority of io we'll
/*
* io contexts are only dynamically allocated for asynchronous
* io. Since async io is likely to be the majority of io we'll
- * have the same number of io contexts as buffer heads ! (FIXME:
- * must reduce this).
+ * have the same number of io contexts as bios! (FIXME: must reduce this).
*/
static unsigned _num_ios;
static mempool_t *_io_pool;
*/
static unsigned _num_ios;
static mempool_t *_io_pool;
+/*
+ * Temporary functions to allow old and new interfaces to co-exist.
+ */
+static struct bio_set *bios(struct dm_io_client *client)
+{
+ return client ? client->bios : _bios;
+}
+
+static mempool_t *io_pool(struct dm_io_client *client)
+{
+ return client ? client->pool : _io_pool;
+}
+
static unsigned int pages_to_ios(unsigned int pages)
{
return 4 * pages; /* too many ? */
static unsigned int pages_to_ios(unsigned int pages)
{
return 4 * pages; /* too many ? */
io_notify_fn fn = io->callback;
void *context = io->context;
io_notify_fn fn = io->callback;
void *context = io->context;
- mempool_free(io, _io_pool);
+ mempool_free(io, io_pool(io->client));
static void dm_bio_destructor(struct bio *bio)
{
static void dm_bio_destructor(struct bio *bio)
{
+ struct io *io = bio->bi_private;
+
+ bio_free(bio, bios(io->client));
}
/*-----------------------------------------------------------------
}
/*-----------------------------------------------------------------
* to hide it from bio_add_page().
*/
num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
* to hide it from bio_add_page().
*/
num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios);
+ bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, bios(io->client));
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
-static int sync_io(unsigned int num_regions, struct io_region *where,
- int rw, struct dpages *dp, unsigned long *error_bits)
+static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ struct io_region *where, int rw, struct dpages *dp,
+ unsigned long *error_bits)
io.error = 0;
atomic_set(&io.count, 1); /* see dispatch_io() */
io.sleeper = current;
io.error = 0;
atomic_set(&io.count, 1); /* see dispatch_io() */
io.sleeper = current;
dispatch_io(rw, num_regions, where, dp, &io, 1);
dispatch_io(rw, num_regions, where, dp, &io, 1);
if (atomic_read(&io.count))
return -EINTR;
if (atomic_read(&io.count))
return -EINTR;
- *error_bits = io.error;
+ if (error_bits)
+ *error_bits = io.error;
+
return io.error ? -EIO : 0;
}
return io.error ? -EIO : 0;
}
-static int async_io(unsigned int num_regions, struct io_region *where, int rw,
- struct dpages *dp, io_notify_fn fn, void *context)
+static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ struct io_region *where, int rw, struct dpages *dp,
+ io_notify_fn fn, void *context)
- io = mempool_alloc(_io_pool, GFP_NOIO);
+ io = mempool_alloc(io_pool(client), GFP_NOIO);
io->error = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->sleeper = NULL;
io->error = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->sleeper = NULL;
io->callback = fn;
io->context = context;
io->callback = fn;
io->context = context;
{
struct dpages dp;
list_dp_init(&dp, pl, offset);
{
struct dpages dp;
list_dp_init(&dp, pl, offset);
- return sync_io(num_regions, where, rw, &dp, error_bits);
+ return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
}
int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
}
int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
{
struct dpages dp;
bvec_dp_init(&dp, bvec);
{
struct dpages dp;
bvec_dp_init(&dp, bvec);
- return sync_io(num_regions, where, rw, &dp, error_bits);
+ return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
}
int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
}
int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
{
struct dpages dp;
vm_dp_init(&dp, data);
{
struct dpages dp;
vm_dp_init(&dp, data);
- return sync_io(num_regions, where, rw, &dp, error_bits);
+ return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
}
int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
}
int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
{
struct dpages dp;
list_dp_init(&dp, pl, offset);
{
struct dpages dp;
list_dp_init(&dp, pl, offset);
- return async_io(num_regions, where, rw, &dp, fn, context);
+ return async_io(NULL, num_regions, where, rw, &dp, fn, context);
}
int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
}
int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
{
struct dpages dp;
bvec_dp_init(&dp, bvec);
{
struct dpages dp;
bvec_dp_init(&dp, bvec);
- return async_io(num_regions, where, rw, &dp, fn, context);
+ return async_io(NULL, num_regions, where, rw, &dp, fn, context);
}
int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
}
int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
{
struct dpages dp;
vm_dp_init(&dp, data);
{
struct dpages dp;
vm_dp_init(&dp, data);
- return async_io(num_regions, where, rw, &dp, fn, context);
+ return async_io(NULL, num_regions, where, rw, &dp, fn, context);
}
EXPORT_SYMBOL(dm_io_get);
}
EXPORT_SYMBOL(dm_io_get);