]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm, page_alloc: rename __GFP_WAIT to __GFP_RECLAIM
authorMel Gorman <mgorman@techsingularity.net>
Wed, 21 Oct 2015 22:03:07 +0000 (09:03 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 21 Oct 2015 22:03:07 +0000 (09:03 +1100)
__GFP_WAIT was used to signal that the caller was in atomic context and
could not sleep.  Now it is possible to distinguish between true atomic
context and callers that are not willing to sleep.  The latter should
clear __GFP_DIRECT_RECLAIM so kswapd will still wake.  As clearing
__GFP_WAIT behaves differently, there is a risk that people will clear the
wrong flags.  This patch renames __GFP_WAIT to __GFP_RECLAIM to clearly
indicate what it does -- setting it allows all reclaim activity, clearing
them prevents it.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Vitaly Wool <vitalywool@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
36 files changed:
block/blk-mq.c
block/scsi_ioctl.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nvme-core.c
drivers/block/paride/pd.c
drivers/block/pktcdvd.c
drivers/gpu/drm/i915/i915_gem.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-devsets.c
drivers/ide/ide-disk.c
drivers/ide/ide-ioctls.c
drivers/ide/ide-park.c
drivers/ide/ide-pm.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/infiniband/hw/qib/qib_init.c
drivers/misc/vmw_balloon.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/staging/rdma/hfi1/init.c
drivers/staging/rdma/ipath/ipath_file_ops.c
fs/cachefiles/internal.h
fs/direct-io.c
fs/nilfs2/mdt.h
include/linux/gfp.h
kernel/power/swap.c
lib/percpu_ida.c
mm/failslab.c
mm/filemap.c
mm/huge_memory.c
mm/migrate.c
mm/page_alloc.c
security/integrity/ima/ima_crypto.c

index 93fa70631a6ed6dc583ecb84d37c1b28da17fcd5..3f3544edb941f169cae54042720c118c40a8563b 100644 (file)
@@ -1199,7 +1199,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
                ctx = blk_mq_get_ctx(q);
                hctx = q->mq_ops->map_queue(q, ctx->cpu);
                blk_mq_set_alloc_data(&alloc_data, q,
-                               __GFP_WAIT|__GFP_HIGH, false, ctx, hctx);
+                               __GFP_RECLAIM|__GFP_HIGH, false, ctx, hctx);
                rq = __blk_mq_alloc_request(&alloc_data, rw);
                ctx = alloc_data.ctx;
                hctx = alloc_data.hctx;
index dda653ce7b24cfb959f668bdb4a676900ed7637d..0774799942e06a8d890a5c88e40990cd53a15037 100644 (file)
@@ -444,7 +444,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 
        }
 
-       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto error_free_buffer;
@@ -495,7 +495,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
                break;
        }
 
-       if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+       if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) {
                err = DRIVER_ERROR << 24;
                goto error;
        }
@@ -536,7 +536,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
        struct request *rq;
        int err;
 
-       rq = blk_get_request(q, WRITE, __GFP_WAIT);
+       rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
index e5e0f19ceda09eb8dfc2e129b504b7c0899eede8..3dc53a16ed3aaf14dfd30311c16e0dc14f6623a0 100644 (file)
@@ -1007,7 +1007,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
        bm_set_page_unchanged(b->bm_pages[page_nr]);
 
        if (ctx->flags & BM_AIO_COPY_PAGES) {
-               page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
+               page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
                copy_highpage(page, b->bm_pages[page_nr]);
                bm_store_page_idx(page, page_nr);
        } else
index f504232c1ee779079353e7ffd4b2d6831f87e61b..a28a562f7b7f245355db7d536e0558c78cc7e374 100644 (file)
@@ -173,7 +173,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
 {
        struct request *rq;
 
-       rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true);
+       rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
        return blk_mq_rq_to_pdu(rq);
 }
 
index 6f04771f1019798cc2feabf73eff2ddbadc84b81..e917cf304ad0c53b470399a80fb95cca8b1ed2ed 100644 (file)
@@ -1030,11 +1030,11 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
        req->special = (void *)0;
 
        if (buffer && bufflen) {
-               ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_WAIT);
+               ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_RECLAIM);
                if (ret)
                        goto out;
        } else if (ubuffer && bufflen) {
-               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_WAIT);
+               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_RECLAIM);
                if (ret)
                        goto out;
                bio = req->bio;
index b9242d78283db3a2bb03524a9bbbfd6923b7dccf..562b5a4ca7b712f6b2b4440375904d8be1ac5200 100644 (file)
@@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk,
        struct request *rq;
        int err = 0;
 
-       rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
index 7be2375db7f2630e8412bd2c21819ae35a6c8da0..5959c2981cc7d18efbbec56615db1d29b2698064 100644 (file)
@@ -704,14 +704,14 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
        int ret = 0;
 
        rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
-                            WRITE : READ, __GFP_WAIT);
+                            WRITE : READ, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
 
        if (cgc->buflen) {
                ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
-                                     __GFP_WAIT);
+                                     __GFP_RECLAIM);
                if (ret)
                        goto out;
        }
index d58cb9e034fe896acf71d6a399cf00e599ff357b..7e505d4be7c0718ab475f62741b5d0d6d3cc9692 100644 (file)
@@ -2216,7 +2216,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        mapping = file_inode(obj->base.filp)->i_mapping;
        gfp = mapping_gfp_mask(mapping);
        gfp |= __GFP_NORETRY | __GFP_NOWARN;
-       gfp &= ~(__GFP_IO | __GFP_WAIT);
+       gfp &= ~(__GFP_IO | __GFP_RECLAIM);
        sg = st->sgl;
        st->nents = 0;
        for (i = 0; i < page_count; i++) {
index 1362ad80a76c071e9e2abb2b36472e433bc5dae0..05352f490d6088d736016e588c8a79313df30e21 100644 (file)
@@ -92,7 +92,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
        struct request *rq;
        int error;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->special = (char *)pc;
 
index 64a6b827b3dd12210e23008a81af06e9d9d7cb64..ef907fd5ba98a036d98d24478af49ec0a4c5831b 100644 (file)
@@ -441,7 +441,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
                struct request *rq;
                int error;
 
-               rq = blk_get_request(drive->queue, write, __GFP_WAIT);
+               rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
 
                memcpy(rq->cmd, cmd, BLK_MAX_CDB);
                rq->cmd_type = REQ_TYPE_ATA_PC;
index 066e3903651842fa31c82bc278c8f7434bf6c950..474173eb31bb345c86bf31fcb195a44432ee14ad 100644 (file)
@@ -303,7 +303,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
        struct request *rq;
        int ret;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_flags = REQ_QUIET;
        ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
index b05a74d78ef560deefd8f022d27f145766e094b4..0dd43b4fcec6353633d12338be557b8e1e13ef14 100644 (file)
@@ -165,7 +165,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
        if (!(setting->flags & DS_SYNC))
                return setting->set(drive, arg);
 
-       rq = blk_get_request(q, READ, __GFP_WAIT);
+       rq = blk_get_request(q, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_len = 5;
        rq->cmd[0] = REQ_DEVSET_EXEC;
index 56b9708894a5e294302e42066711274b6941616c..37a8a907febeb2556277fa78d03f474a5d865d84 100644 (file)
@@ -477,7 +477,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
        if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
                return -EBUSY;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 
        drive->mult_req = arg;
index aa2e9b77b20d39a67d3da80fd23338fcb631faf6..d05db2469209bb1dfe1f9757d078b0298e55d180 100644 (file)
@@ -125,7 +125,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
        if (NULL == (void *) arg) {
                struct request *rq;
 
-               rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+               rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
                rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
                err = blk_execute_rq(drive->queue, NULL, rq, 0);
                blk_put_request(rq);
@@ -221,7 +221,7 @@ static int generic_drive_reset(ide_drive_t *drive)
        struct request *rq;
        int ret = 0;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_len = 1;
        rq->cmd[0] = REQ_DRIVE_RESET;
index c808685204883db93213e80ee9516a298e1f58b1..2d7dca56dd244387a633e2eec30b177dfbd25a70 100644 (file)
@@ -31,7 +31,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
        }
        spin_unlock_irq(&hwif->lock);
 
-       rq = blk_get_request(q, READ, __GFP_WAIT);
+       rq = blk_get_request(q, READ, __GFP_RECLAIM);
        rq->cmd[0] = REQ_PARK_HEADS;
        rq->cmd_len = 1;
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
index 081e43458d50f745671f9c5c4476d760e33b7fc0..e34af488693a62d26fbd7d282232d393a4807f55 100644 (file)
@@ -18,7 +18,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
        rq->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_SUSPEND;
@@ -88,7 +88,7 @@ int generic_ide_resume(struct device *dev)
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
        rq->cmd_flags |= REQ_PREEMPT;
        rq->special = &rqpm;
index f5d51d1d09ee480becca86a1cda687ab3919cdb9..12fa04997dcc33a09129389435fb96f00bfe8e87 100644 (file)
@@ -852,7 +852,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
        BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
        BUG_ON(size < 0 || size % tape->blk_size);
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd[13] = cmd;
        rq->rq_disk = tape->disk;
@@ -860,7 +860,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
 
        if (size) {
                ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
-                                     __GFP_WAIT);
+                                     __GFP_RECLAIM);
                if (ret)
                        goto out_put;
        }
index 0979e126fff1e69ee3b3f8df19e5a443cbae5194..a716693417a308c49186bcfb4e692aa86ef1d99d 100644 (file)
@@ -430,7 +430,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
        int error;
        int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
 
-       rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 
        /*
@@ -441,7 +441,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
         */
        if (nsect) {
                error = blk_rq_map_kern(drive->queue, rq, buf,
-                                       nsect * SECTOR_SIZE, __GFP_WAIT);
+                                       nsect * SECTOR_SIZE, __GFP_RECLAIM);
                if (error)
                        goto put_req;
        }
index 7e00470adc30223c183f0e287f7a7d0d9beff944..4ff340fe904f5c3bc9484b1f0cf2c53abd2bd4dd 100644 (file)
@@ -1680,7 +1680,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        egrcnt = rcd->rcvegrcnt;
        egroff = rcd->rcvegr_tid_base;
index ffb56340d0c7c28bf2daaf0f512ae835fa659f38..1b49e53463a268a35a5e75fe2bbc81c5c8c76119 100644 (file)
@@ -85,7 +85,7 @@ MODULE_LICENSE("GPL");
 
 /*
  * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
- * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
+ * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
  * __GFP_NOWARN, to suppress page allocation failure warnings.
  */
 #define VMW_PAGE_ALLOC_NOSLEEP         (__GFP_HIGHMEM|__GFP_NOWARN)
index 66a96cd98b975dcdbd5429cf02f069b52771fa84..984ddcb4786d6074c94a2b0387c25362a4f4c2c4 100644 (file)
@@ -1970,7 +1970,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
        struct request *req;
 
        /*
-        * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
+        * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
         * request becomes available
         */
        req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
index 126a48c6431e5a5d9798aed3472916b06ef476c8..dd8ad2a44510cae1b97260b7d64fc65e7770db53 100644 (file)
@@ -222,13 +222,13 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
        int write = (data_direction == DMA_TO_DEVICE);
        int ret = DRIVER_ERROR << 24;
 
-       req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
+       req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
        if (IS_ERR(req))
                return ret;
        blk_rq_set_block_pc(req);
 
        if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
-                                       buffer, bufflen, __GFP_WAIT))
+                                       buffer, bufflen, __GFP_RECLAIM))
                goto out;
 
        req->cmd_len = COMMAND_SIZE(cmd[0]);
index a877eda8c13c72ac5fa67c55bf1bd1eddf50139c..29fff7f2a45a1f59334c3783d0f0c7ecc54001ec 100644 (file)
@@ -1564,7 +1564,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        /*
         * The minimum size of the eager buffers is a groups of MTU-sized
index 450d1596500512c7f41174dec8d078bb15edb79a..c11f6c58ce534df07c91f01c57a2ddbb8f1ba1f2 100644 (file)
@@ -905,7 +905,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        egrcnt = dd->ipath_rcvegrcnt;
        /* TID number offset for this port */
index aecd0859eacbf28d9b3782527c178f8e05000e7c..9c4b737a54df64bb15df446c5c359e594ce5b3a3 100644 (file)
@@ -30,7 +30,7 @@ extern unsigned cachefiles_debug;
 #define CACHEFILES_DEBUG_KLEAVE        2
 #define CACHEFILES_DEBUG_KDEBUG        4
 
-#define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC)
+#define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
 
 /*
  * node records
index 11256291642e5df1f66fd1880109cf7c260796cc..dbb94a2d6c504a08d0349d891ccbc2d8acc2f6a1 100644 (file)
@@ -360,7 +360,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
 
        /*
         * bio_alloc() is guaranteed to return a bio when called with
-        * __GFP_WAIT and we request a valid number of vectors.
+        * __GFP_RECLAIM and we request a valid number of vectors.
         */
        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
index fe529a87a208d4db026efbe98de71608a1622a6c..03246cac33384e2cc9a0d49bfe8beaba74118873 100644 (file)
@@ -72,7 +72,7 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
 }
 
 /* Default GFP flags using highmem */
-#define NILFS_MDT_GFP      (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
+#define NILFS_MDT_GFP      (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM)
 
 int nilfs_mdt_get_block(struct inode *, unsigned long, int,
                        void (*init_block)(struct inode *,
index b56e811b6f7c77f91a3535da405c919574c2f4f7..60b2db94d49d43d872382ce6807655e2db0e1bd2 100644 (file)
@@ -107,7 +107,7 @@ struct vm_area_struct;
  * can be cleared when the reclaiming of pages would cause unnecessary
  * disruption.
  */
-#define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
 #define __GFP_DIRECT_RECLAIM   ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
 #define __GFP_KSWAPD_RECLAIM   ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
 
@@ -126,12 +126,12 @@ struct vm_area_struct;
  */
 #define GFP_ATOMIC     (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
 #define GFP_NOWAIT     (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO       (__GFP_WAIT)
-#define GFP_NOFS       (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL     (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_TEMPORARY  (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+#define GFP_NOIO       (__GFP_RECLAIM)
+#define GFP_NOFS       (__GFP_RECLAIM | __GFP_IO)
+#define GFP_KERNEL     (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_TEMPORARY  (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
                         __GFP_RECLAIMABLE)
-#define GFP_USER       (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_USER       (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
 #define GFP_HIGHUSER   (GFP_USER | __GFP_HIGHMEM)
 #define GFP_HIGHUSER_MOVABLE   (GFP_HIGHUSER | __GFP_MOVABLE)
 #define GFP_IOFS       (__GFP_IO | __GFP_FS | __GFP_KSWAPD_RECLAIM)
@@ -144,12 +144,12 @@ struct vm_area_struct;
 #define GFP_MOVABLE_SHIFT 3
 
 /* Control page allocator reclaim behavior */
-#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
+#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
                        __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
                        __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
 
 /* Control slab gfp mask during early boot */
-#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
+#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
 
 /* Control allocation constraints */
 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
index b2066fb5b10f7639af4a391fe9786429cff90ac4..53d95673d88a4a9a4c7d7e020f96363591d8eb76 100644 (file)
@@ -257,7 +257,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
        struct bio *bio;
        int error = 0;
 
-       bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+       bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
        bio->bi_bdev = hib_resume_bdev;
 
@@ -356,7 +356,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
                return -ENOSPC;
 
        if (hb) {
-               src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
+               src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
                                              __GFP_NORETRY);
                if (src) {
                        copy_page(src, buf);
@@ -364,7 +364,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
                        ret = hib_wait_io(hb); /* Free pages */
                        if (ret)
                                return ret;
-                       src = (void *)__get_free_page(__GFP_WAIT |
+                       src = (void *)__get_free_page(__GFP_RECLAIM |
                                                      __GFP_NOWARN |
                                                      __GFP_NORETRY);
                        if (src) {
@@ -672,7 +672,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
        nr_threads = num_online_cpus() - 1;
        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 
-       page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+       page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
        if (!page) {
                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
                ret = -ENOMEM;
@@ -975,7 +975,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
                last = tmp;
 
                tmp->map = (struct swap_map_page *)
-                          __get_free_page(__GFP_WAIT | __GFP_HIGH);
+                          __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
                if (!tmp->map) {
                        release_swap_reader(handle);
                        return -ENOMEM;
@@ -1242,8 +1242,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
 
        for (i = 0; i < read_pages; i++) {
                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
-                                                 __GFP_WAIT | __GFP_HIGH :
-                                                 __GFP_WAIT | __GFP_NOWARN |
+                                                 __GFP_RECLAIM | __GFP_HIGH :
+                                                 __GFP_RECLAIM | __GFP_NOWARN |
                                                  __GFP_NORETRY);
 
                if (!page[i]) {
index f75715131f2094cc3b76d2d1f14ba31e1ccf3744..6d40944960de77bff090f41a0f94f0f85e8522a4 100644 (file)
@@ -135,7 +135,7 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
  * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
  *
  * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
+ * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
  * however long it takes until another thread frees an id (same semantics as a
  * mempool).
  *
index fefaabaab76da1f8758436f6e2ddf788251a5b1a..69f083146a379c4582dbb3548de512dd5d2e07f6 100644 (file)
@@ -3,11 +3,11 @@
 
 static struct {
        struct fault_attr attr;
-       u32 ignore_gfp_wait;
+       u32 ignore_gfp_reclaim;
        int cache_filter;
 } failslab = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = 1,
+       .ignore_gfp_reclaim = 1,
        .cache_filter = 0,
 };
 
@@ -16,7 +16,7 @@ bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
        if (gfpflags & __GFP_NOFAIL)
                return false;
 
-        if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
+        if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM))
                return false;
 
        if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
@@ -42,7 +42,7 @@ static int __init failslab_debugfs_init(void)
                return PTR_ERR(dir);
 
        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
-                               &failslab.ignore_gfp_wait))
+                               &failslab.ignore_gfp_reclaim))
                goto fail;
        if (!debugfs_create_bool("cache-filter", mode, dir,
                                &failslab.cache_filter))
index 884766da11655afb1bf54d176ed32da732aa3f3d..0acc15f164c93e74042a26b475416874d0191366 100644 (file)
@@ -2713,7 +2713,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
  * page is known to the local caching routines.
  *
  * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
+ * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
  *
  */
 int try_to_release_page(struct page *page, gfp_t gfp_mask)
index 916dffc00a6b7376c77c42fcf978138859622ff9..7e7feb640abae8ae141aa2c9481144f546d15cb9 100644 (file)
@@ -844,7 +844,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 
 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 {
-       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
+       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
 }
 
 /* Caller must hold page table lock. */
index efdf9a31dd6cd47a9fd335c7d4278048aa71c716..25f0fa141593245937c2c8654b5dddfc38cee3ba 100644 (file)
@@ -1750,7 +1750,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                goto out_dropref;
 
        new_page = alloc_pages_node(node,
-               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
+               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
                HPAGE_PMD_ORDER);
        if (!new_page)
                goto out_fail;
index 7c7f7a3098442f36c58ebeb4b309b0e3f518ebce..2db29dac02f5bac77020d90f6743fb27cdb5aefc 100644 (file)
@@ -2160,11 +2160,11 @@ static struct {
        struct fault_attr attr;
 
        u32 ignore_gfp_highmem;
-       u32 ignore_gfp_wait;
+       u32 ignore_gfp_reclaim;
        u32 min_order;
 } fail_page_alloc = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = 1,
+       .ignore_gfp_reclaim = 1,
        .ignore_gfp_highmem = 1,
        .min_order = 1,
 };
@@ -2202,7 +2202,7 @@ static int __init fail_page_alloc_debugfs(void)
                return PTR_ERR(dir);
 
        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
-                               &fail_page_alloc.ignore_gfp_wait))
+                               &fail_page_alloc.ignore_gfp_reclaim))
                goto fail;
        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
                                &fail_page_alloc.ignore_gfp_highmem))
index e24121afb2f2773eacced7410dbdd5953a49c206..6eb62936c6723c02ca0b565ebe955aab2eb74c17 100644 (file)
@@ -126,7 +126,7 @@ static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
 {
        void *ptr;
        int order = ima_maxorder;
-       gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY;
+       gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
 
        if (order)
                order = min(get_order(max_size), order);