/*
* Go grab and pin some userspace pages. Typically we'll get 64 at a time.
*/
-static int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
+static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
{
int ret;
int nr_pages;
* decent number of pages, less frequently. To provide nicer use of the
* L1 cache.
*/
-static struct page *dio_get_page(struct dio *dio, struct dio_submit *sdio)
+static inline struct page *dio_get_page(struct dio *dio, struct dio_submit *sdio)
{
if (dio_pages_present(dio, sdio) == 0) {
int ret;
}
EXPORT_SYMBOL_GPL(dio_end_io);
-static void
+static void inline
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
struct block_device *bdev,
sector_t first_sector, int nr_vecs)
*
* bios hold a dio reference between submit_bio and ->end_io.
*/
-static void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
+static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
{
struct bio *bio = sdio->bio;
unsigned long flags;
/*
* Release any resources in case of a failure
*/
-static void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
+static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
{
while (dio_pages_present(dio, sdio))
page_cache_release(dio_get_page(dio, sdio));
*
* This also helps to limit the peak amount of pinned userspace memory.
*/
-static int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
+static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
{
int ret = 0;
/*
* There is no bio. Make one now.
*/
-static int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
+static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
sector_t start_sector, struct buffer_head *map_bh)
{
sector_t sector;
*
* Return zero on success. Non-zero means the caller needs to start a new BIO.
*/
-static int dio_bio_add_page(struct dio *dio, struct dio_submit *sdio)
+static inline int dio_bio_add_page(struct dio *dio, struct dio_submit *sdio)
{
int ret;
* The caller of this function is responsible for removing cur_page from the
* dio, and for dropping the refcount which came from that presence.
*/
-static int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
- struct buffer_head *map_bh)
+static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
+ struct buffer_head *map_bh)
{
int ret = 0;
* If that doesn't work out then we put the old page into the bio and add this
* page to the dio instead.
*/
-static int
+static inline int
submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
unsigned offset, unsigned len, sector_t blocknr,
struct buffer_head *map_bh)
* `end' is zero if we're doing the start of the IO, 1 at the end of the
* IO.
*/
-static void dio_zero_block(struct dio *dio, struct dio_submit *sdio, int end,
+static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, int end,
struct buffer_head *map_bh)
{
unsigned dio_blocks_per_fs_block;
return ret;
}
-static ssize_t
+static inline ssize_t
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
const struct iovec *iov, loff_t offset, unsigned long nr_segs,
unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
* expected that filesystem provide exclusion between new direct I/O
* and truncates. For DIO_LOCKING filesystems this is done by i_mutex,
* but other filesystems need to take care of this on their own.
+ *
+ * NOTE: if you pass "sdio" to anything by pointer make sure that function
+ * is always inlined. Otherwise gcc is unable to split the structure into
+ * individual fields and will generate much worse code.
+ * This is important for the whole file.
*/
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,