]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'block/for-next'
authorThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:36:26 +0000 (14:36 +0200)
committerThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:36:26 +0000 (14:36 +0200)
1  2 
block/blk-settings.c
drivers/block/loop.c
fs/fscache/object.c

diff --combined block/blk-settings.c
index 026c1517505f2aaab4780a15735850104abbf7eb,53309333c2f015fff6b6a7ae8a9a01700105b3e8..05e826793e4e36b2e6c8de29802674767e3bd4d8
@@@ -144,6 -144,7 +144,7 @@@ void blk_set_stacking_limits(struct que
        lim->discard_zeroes_data = 1;
        lim->max_segments = USHRT_MAX;
        lim->max_hw_sectors = UINT_MAX;
+       lim->max_segment_size = UINT_MAX;
        lim->max_sectors = UINT_MAX;
        lim->max_write_same_sectors = UINT_MAX;
  }
@@@ -195,17 -196,17 +196,17 @@@ EXPORT_SYMBOL(blk_queue_make_request)
  /**
   * blk_queue_bounce_limit - set bounce buffer limit for queue
   * @q: the request queue for the device
 - * @dma_mask: the maximum address the device can handle
 + * @max_addr: the maximum address the device can handle
   *
   * Description:
   *    Different hardware can have different requirements as to what pages
   *    it can do I/O directly to. A low level driver can call
   *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
 - *    buffers for doing I/O to pages residing above @dma_mask.
 + *    buffers for doing I/O to pages residing above @max_addr.
   **/
 -void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
 +void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
  {
 -      unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
 +      unsigned long b_pfn = max_addr >> PAGE_SHIFT;
        int dma = 0;
  
        q->bounce_gfp = GFP_NOIO;
diff --combined drivers/block/loop.c
index e5647690a751ef1f1ea6bcd09ee24dc758a1e292,c8dac730524408f63e78cb9acdae8294aaf8dafa..abe147aee19d2b7b3c243a46b38cb3b0c55831a7
@@@ -75,7 -75,6 +75,7 @@@
  #include <linux/sysfs.h>
  #include <linux/miscdevice.h>
  #include <linux/falloc.h>
 +#include <linux/aio.h>
  #include "loop.h"
  
  #include <asm/uaccess.h>
@@@ -219,48 -218,6 +219,48 @@@ lo_do_transfer(struct loop_device *lo, 
        return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
  }
  
 +#ifdef CONFIG_AIO
 +static void lo_rw_aio_complete(u64 data, long res)
 +{
 +      struct bio *bio = (struct bio *)(uintptr_t)data;
 +
 +      if (res > 0)
 +              res = 0;
 +      else if (res < 0)
 +              res = -EIO;
 +
 +      bio_endio(bio, res);
 +}
 +
 +static int lo_rw_aio(struct loop_device *lo, struct bio *bio)
 +{
 +      struct file *file = lo->lo_backing_file;
 +      struct kiocb *iocb;
 +      unsigned int op;
 +      struct iov_iter iter;
 +      struct bio_vec *bvec;
 +      size_t nr_segs;
 +      loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 +
 +      iocb = aio_kernel_alloc(GFP_NOIO);
 +      if (!iocb)
 +              return -ENOMEM;
 +
 +      if (bio_rw(bio) & WRITE)
 +              op = IOCB_CMD_WRITE_ITER;
 +      else
 +              op = IOCB_CMD_READ_ITER;
 +
 +      bvec = bio_iovec_idx(bio, bio->bi_idx);
 +      nr_segs = bio_segments(bio);
 +      iov_iter_init_bvec(&iter, bvec, nr_segs, bvec_length(bvec, nr_segs), 0);
 +      aio_kernel_init_rw(iocb, file, iov_iter_count(&iter), pos);
 +      aio_kernel_init_callback(iocb, lo_rw_aio_complete, (u64)(uintptr_t)bio);
 +
 +      return aio_kernel_submit(iocb, op, &iter);
 +}
 +#endif /* CONFIG_AIO */
 +
  /**
   * __do_lo_send_write - helper for writing data to a loop device
   *
@@@ -461,33 -418,50 +461,33 @@@ static int do_bio_filebacked(struct loo
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
  
        if (bio_rw(bio) == WRITE) {
 -              struct file *file = lo->lo_backing_file;
 +              ret = lo_send(lo, bio, pos);
 +      } else
 +              ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
  
 -              if (bio->bi_rw & REQ_FLUSH) {
 -                      ret = vfs_fsync(file, 0);
 -                      if (unlikely(ret && ret != -EINVAL)) {
 -                              ret = -EIO;
 -                              goto out;
 -                      }
 -              }
 +      return ret;
 +}
  
 -              /*
 -               * We use punch hole to reclaim the free space used by the
 -               * image a.k.a. discard. However we do not support discard if
 -               * encryption is enabled, because it may give an attacker
 -               * useful information.
 -               */
 -              if (bio->bi_rw & REQ_DISCARD) {
 -                      struct file *file = lo->lo_backing_file;
 -                      int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
 -
 -                      if ((!file->f_op->fallocate) ||
 -                          lo->lo_encrypt_key_size) {
 -                              ret = -EOPNOTSUPP;
 -                              goto out;
 -                      }
 -                      ret = file->f_op->fallocate(file, mode, pos,
 -                                                  bio->bi_size);
 -                      if (unlikely(ret && ret != -EINVAL &&
 -                                   ret != -EOPNOTSUPP))
 -                              ret = -EIO;
 -                      goto out;
 -              }
 +static int lo_discard(struct loop_device *lo, struct bio *bio)
 +{
 +      struct file *file = lo->lo_backing_file;
 +      int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
 +      loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 +      int ret;
  
 -              ret = lo_send(lo, bio, pos);
 +      /*
 +       * We use punch hole to reclaim the free space used by the
 +       * image a.k.a. discard. However we do not support discard if
 +       * encryption is enabled, because it may give an attacker
 +       * useful information.
 +       */
  
 -              if ((bio->bi_rw & REQ_FUA) && !ret) {
 -                      ret = vfs_fsync(file, 0);
 -                      if (unlikely(ret && ret != -EINVAL))
 -                              ret = -EIO;
 -              }
 -      } else
 -              ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
 +      if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size)
 +              return -EOPNOTSUPP;
  
 -out:
 +      ret = file->f_op->fallocate(file, mode, pos, bio->bi_size);
 +      if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
 +              ret = -EIO;
        return ret;
  }
  
@@@ -551,35 -525,7 +551,35 @@@ static inline void loop_handle_bio(stru
                do_loop_switch(lo, bio->bi_private);
                bio_put(bio);
        } else {
 -              int ret = do_bio_filebacked(lo, bio);
 +              int ret;
 +
 +              if (bio_rw(bio) == WRITE) {
 +                      if (bio->bi_rw & REQ_FLUSH) {
 +                              ret = vfs_fsync(lo->lo_backing_file, 1);
 +                              if (unlikely(ret && ret != -EINVAL))
 +                                      goto out;
 +                      }
 +                      if (bio->bi_rw & REQ_DISCARD) {
 +                              ret = lo_discard(lo, bio);
 +                              goto out;
 +                      }
 +              }
 +#ifdef CONFIG_AIO
 +              if (lo->lo_flags & LO_FLAGS_USE_AIO &&
 +                  lo->transfer == transfer_none) {
 +                      ret = lo_rw_aio(lo, bio);
 +                      if (ret == 0)
 +                              return;
 +              } else
 +#endif
 +                      ret = do_bio_filebacked(lo, bio);
 +
 +              if ((bio_rw(bio) == WRITE) && bio->bi_rw & REQ_FUA && !ret) {
 +                      ret = vfs_fsync(lo->lo_backing_file, 0);
 +                      if (unlikely(ret && ret != -EINVAL))
 +                              ret = -EIO;
 +              }
 +out:
                bio_endio(bio, ret);
        }
  }
@@@ -601,12 -547,6 +601,12 @@@ static int loop_thread(void *data
        struct loop_device *lo = data;
        struct bio *bio;
  
 +      /*
 +       * In cases where the underlying filesystem calls balance_dirty_pages()
 +       * we want less throttling to avoid lock ups trying to write dirty
 +       * pages through the loop device
 +       */
 +      current->flags |= PF_LESS_THROTTLE;
        set_user_nice(current, -20);
  
        while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
@@@ -929,14 -869,6 +929,14 @@@ static int loop_set_fd(struct loop_devi
            !file->f_op->write)
                lo_flags |= LO_FLAGS_READ_ONLY;
  
 +#ifdef CONFIG_AIO
 +      if (file->f_op->write_iter && file->f_op->read_iter &&
 +          mapping->a_ops->direct_IO) {
 +              file->f_flags |= O_DIRECT;
 +              lo_flags |= LO_FLAGS_USE_AIO;
 +      }
 +#endif
 +
        lo_blocksize = S_ISBLK(inode->i_mode) ?
                inode->i_bdev->bd_block_size : PAGE_SIZE;
  
  
        bio_list_init(&lo->lo_bio_list);
  
-       /*
-        * set queue make_request_fn, and add limits based on lower level
-        * device
-        */
-       blk_queue_make_request(lo->lo_queue, loop_make_request);
-       lo->lo_queue->queuedata = lo;
        if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
                blk_queue_flush(lo->lo_queue, REQ_FLUSH);
  
  
        set_blocksize(bdev, lo_blocksize);
  
 +#ifdef CONFIG_AIO
 +      /*
 +       * We must not send too-small direct-io requests, so we inherit
 +       * the logical block size from the underlying device
 +       */
 +      if ((lo_flags & LO_FLAGS_USE_AIO) && inode->i_sb->s_bdev)
 +              blk_queue_logical_block_size(lo->lo_queue,
 +                              bdev_logical_block_size(inode->i_sb->s_bdev));
 +#endif
 +
        lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
                                                lo->lo_number);
        if (IS_ERR(lo->lo_thread)) {
@@@ -1696,6 -1611,8 +1689,8 @@@ static int loop_add(struct loop_device 
        if (!lo)
                goto out;
  
+       lo->lo_state = Lo_unbound;
        /* allocate id, if @id >= 0, we're requesting that specific id */
        if (i >= 0) {
                err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
        err = -ENOMEM;
        lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
        if (!lo->lo_queue)
-               goto out_free_dev;
+               goto out_free_idr;
+       /*
+        * set queue make_request_fn
+        */
+       blk_queue_make_request(lo->lo_queue, loop_make_request);
+       lo->lo_queue->queuedata = lo;
  
        disk = lo->lo_disk = alloc_disk(1 << part_shift);
        if (!disk)
  
  out_free_queue:
        blk_cleanup_queue(lo->lo_queue);
+ out_free_idr:
+       idr_remove(&loop_index_idr, i);
  out_free_dev:
        kfree(lo);
  out:
@@@ -1819,7 -1744,7 +1822,7 @@@ static struct kobject *loop_probe(dev_
        if (err < 0)
                err = loop_add(&lo, MINOR(dev) >> part_shift);
        if (err < 0)
-               kobj = ERR_PTR(err);
+               kobj = NULL;
        else
                kobj = get_disk(lo->lo_disk);
        mutex_unlock(&loop_index_mutex);
diff --combined fs/fscache/object.c
index dcb8216177747d033731bfd6b4a42ff903af44c5,3b0712213993de45941e141b033be0587b51c80c..53d35c5042404738c213233220500feaf7686ae8
@@@ -495,7 -495,6 +495,7 @@@ void fscache_object_lookup_negative(str
                 * returning ENODATA.
                 */
                set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
 +              clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  
                _debug("wake up lookup %p", &cookie->flags);
                clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
@@@ -528,7 -527,6 +528,7 @@@ void fscache_obtained_object(struct fsc
  
                /* We do (presumably) have data */
                clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
 +              clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  
                /* Allow write requests to begin stacking up and read requests
                 * to begin shovelling data.
@@@ -681,8 -679,7 +681,8 @@@ static const struct fscache_state *fsca
         */
        spin_lock(&cookie->lock);
        hlist_del_init(&object->cookie_link);
 -      if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
 +      if (hlist_empty(&cookie->backing_objects) &&
 +          test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
                awaken = true;
        spin_unlock(&cookie->lock);
  
@@@ -799,7 -796,7 +799,7 @@@ void fscache_enqueue_object(struct fsca
   */
  bool fscache_object_sleep_till_congested(signed long *timeoutp)
  {
-       wait_queue_head_t *cong_wq = &__get_cpu_var(fscache_object_cong_wait);
+       wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
        DEFINE_WAIT(wait);
  
        if (fscache_object_congested())
@@@ -930,7 -927,7 +930,7 @@@ static const struct fscache_state *_fsc
         */
        if (!fscache_use_cookie(object)) {
                ASSERT(object->cookie->stores.rnode == NULL);
 -              set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
 +              set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
                _leave(" [no cookie]");
                return transit_to(KILL_OBJECT);
        }