]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
arch: Mass conversion of smp_mb__*()
authorPeter Zijlstra <peterz@infradead.org>
Mon, 17 Mar 2014 17:06:10 +0000 (18:06 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 18 Apr 2014 12:20:48 +0000 (14:20 +0200)
Mostly scripted conversion of the smp_mb__* barriers.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-arch@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
106 files changed:
block/blk-iopoll.c
crypto/chainiv.c
drivers/base/power/domain.c
drivers/block/mtip32xx/mtip32xx.c
drivers/cpuidle/coupled.c
drivers/firewire/ohci.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_irq.c
drivers/md/bcache/bcache.h
drivers/md/bcache/closure.h
drivers/md/dm-bufio.c
drivers/md/dm-snap.c
drivers/md/dm.c
drivers/md/raid5.c
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/wireless/ti/wlcore/main.c
drivers/pci/xen-pcifront.c
drivers/scsi/isci/remote_device.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_alua.c
drivers/target/target_core_device.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/target_core_ua.c
drivers/tty/n_tty.c
drivers/tty/serial/mxs-auart.c
drivers/usb/gadget/tcm_usb_gadget.c
drivers/usb/serial/usb_wwan.c
drivers/vhost/scsi.c
drivers/w1/w1_family.c
drivers/xen/xen-pciback/pciback_ops.c
fs/btrfs/btrfs_inode.h
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/buffer.c
fs/ext4/resize.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/lock_dlm.c
fs/gfs2/recovery.c
fs/gfs2/sys.c
fs/jbd2/commit.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4state.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/write.c
fs/ubifs/lpt_commit.c
fs/ubifs/tnc_commit.c
include/asm-generic/bitops/atomic.h
include/asm-generic/bitops/lock.h
include/linux/buffer_head.h
include/linux/genhd.h
include/linux/interrupt.h
include/linux/netdevice.h
include/linux/sched.h
include/linux/sunrpc/sched.h
include/linux/sunrpc/xprt.h
include/linux/tracehook.h
include/net/ip_vs.h
kernel/debug/debug_core.c
kernel/futex.c
kernel/kmod.c
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h
kernel/sched/cpupri.c
kernel/sched/wait.c
mm/backing-dev.c
mm/filemap.c
net/atm/pppoatm.c
net/bluetooth/hci_event.c
net/core/dev.c
net/core/link_watch.c
net/ipv4/inetpeer.c
net/ipv4/tcp_output.c
net/netfilter/nf_conntrack_core.c
net/rds/ib_recv.c
net/rds/iw_recv.c
net/rds/send.c
net/rds/tcp_send.c
net/sunrpc/auth.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/unix/af_unix.c
sound/pci/bt87x.c

index c11d24e379e2a4b89caf1936d4433773caef1bdc..f8c6a11b13f07b70966357256238abe88dee7cb5 100644 (file)
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(blk_iopoll_sched);
 void __blk_iopoll_complete(struct blk_iopoll *iop)
 {
        list_del(&iop->list);
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
 }
 EXPORT_SYMBOL(__blk_iopoll_complete);
@@ -161,7 +161,7 @@ EXPORT_SYMBOL(blk_iopoll_disable);
 void blk_iopoll_enable(struct blk_iopoll *iop)
 {
        BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
 }
 EXPORT_SYMBOL(blk_iopoll_enable);
index 834d8dd3d4fc7abf1c72da93cb24f81632989239..9c294c8f9a07888a7d3f5a9210da334c26218f77 100644 (file)
@@ -126,7 +126,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
        int err = ctx->err;
 
        if (!ctx->queue.qlen) {
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
 
                if (!ctx->queue.qlen ||
index ae098a261fcdb29262e778036ad5e9712841f221..eee55c1e5fde49310779c9fc4e15aa8ce5415114 100644 (file)
@@ -105,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
 {
        atomic_inc(&genpd->sd_count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 }
 
 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
index 59c5abe32f06f055e939e5290e56055d66bd4c05..4fd8d6c1c3d2f5b27f95d60c12199b24f2fe967d 100644 (file)
@@ -224,9 +224,9 @@ static int get_slot(struct mtip_port *port)
  */
 static inline void release_slot(struct mtip_port *port, int tag)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(tag, port->allocated);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 /*
index cb6654bfad77f7ec221900a381dfa56abd453363..73fe2f8d7f961dad2c227dca5c32f0080e2b8604 100644 (file)
@@ -159,7 +159,7 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
 {
        int n = dev->coupled->online_count;
 
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_inc(a);
 
        while (atomic_read(a) < n)
index 8db66321956068701cde997e0bc25e167e781eb3..995dd42a262731c73c37df2aede83d6976b4ffbb 100644 (file)
@@ -3498,7 +3498,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
                }
 
                clear_bit_unlock(0, &ctx->flushing_completions);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
        }
 
        tasklet_enable(&ctx->context.tasklet);
index c2676b5908d9f6edbcf4dd8d9fb126cfb93cd5ab..ec5c3f4cdd011de07be08bc1b74a955fb4b3fcda 100644 (file)
@@ -156,7 +156,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
         */
        if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
                atomic_inc(&dev->vblank[crtc].count);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
        }
 
        /* Invalidate all timestamps while vblank irq's are off. */
@@ -864,9 +864,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
                vblanktimestamp(dev, crtc, tslot) = t_vblank;
        }
 
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_add(diff, &dev->vblank[crtc].count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 }
 
 /**
@@ -1330,9 +1330,9 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
                /* Increment cooked vblank count. This also atomically commits
                 * the timestamp computed above.
                 */
-               smp_mb__before_atomic_inc();
+               smp_mb__before_atomic();
                atomic_inc(&dev->vblank[crtc].count);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
        } else {
                DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
                          crtc, (int) diff_ns);
index 7753249b3a959cce7f31b8c9cf1ba0b36c18dce8..5409bfafff63c486084ce538cd35da01359e3352 100644 (file)
@@ -2147,7 +2147,7 @@ static void i915_error_work_func(struct work_struct *work)
                         * updates before
                         * the counter increment.
                         */
-                       smp_mb__before_atomic_inc();
+                       smp_mb__before_atomic();
                        atomic_inc(&dev_priv->gpu_error.reset_counter);
 
                        kobject_uevent_env(&dev->primary->kdev->kobj,
index 82c9c5d35251f643b325b7e2075f03d66532833c..d2ebcf3230942ab7872025c9fbf177ae515aded4 100644 (file)
@@ -828,7 +828,7 @@ static inline bool cached_dev_get(struct cached_dev *dc)
                return false;
 
        /* Paired with the mb in cached_dev_attach */
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        return true;
 }
 
index 7ef7461912be252d00ce23b79504664e96ffd84f..a08e3eeac3c5fbf389ede37b839f40f705cd4d47 100644 (file)
@@ -243,7 +243,7 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
        cl->fn = fn;
        cl->wq = wq;
        /* between atomic_dec() in closure_put() */
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
 }
 
 static inline void closure_queue(struct closure *cl)
index 66c5d130c8c24c4f3101ce78296460da4487f38b..4e84095833dbce97972318a59841a75db0f2b241 100644 (file)
@@ -607,9 +607,9 @@ static void write_endio(struct bio *bio, int error)
 
        BUG_ON(!test_bit(B_WRITING, &b->state));
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(B_WRITING, &b->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        wake_up_bit(&b->state, B_WRITING);
 }
@@ -997,9 +997,9 @@ static void read_endio(struct bio *bio, int error)
 
        BUG_ON(!test_bit(B_READING, &b->state));
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(B_READING, &b->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        wake_up_bit(&b->state, B_READING);
 }
index ebddef5237e4b28e6254e486b3267dbccca9864e..8e0caed0bf74650d85279b9331d1b1fec132058f 100644 (file)
@@ -642,7 +642,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
        struct dm_snapshot *s = pe->snap;
 
        mempool_free(pe, s->pending_pool);
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&s->pending_exceptions_count);
 }
 
@@ -783,7 +783,7 @@ static int init_hash_tables(struct dm_snapshot *s)
 static void merge_shutdown(struct dm_snapshot *s)
 {
        clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&s->state_bits, RUNNING_MERGE);
 }
 
index 455e6491649889d1970cd8ef3425e93f2ea6a93c..2db768e4553f9221c23abf5f81643b9f909d4839 100644 (file)
@@ -2447,7 +2447,7 @@ static void dm_wq_work(struct work_struct *work)
 static void dm_queue_flush(struct mapped_device *md)
 {
        clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        queue_work(md->wq, &md->work);
 }
 
index ad1b9bea446ebdbaea0083cb976ed4207c618cc1..2afef4ec9312d222bb51e76fbce26f254e564cf0 100644 (file)
@@ -4400,7 +4400,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
                         * STRIPE_ON_UNPLUG_LIST clear but the stripe
                         * is still in our list
                         */
-                       smp_mb__before_clear_bit();
+                       smp_mb__before_atomic();
                        clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
                        /*
                         * STRIPE_ON_RELEASE_LIST could be set here. In that
index de02db802acea6bc47caacab100d6481eb639f8f..e35580618936cd62e8a3bbd3ec54e50ebd7cc211 100644 (file)
@@ -399,7 +399,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
 
        /* clear 'streaming' status bit */
        clear_bit(ADAP_STREAMING, &adap->state_bits);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&adap->state_bits, ADAP_STREAMING);
 skip_feed_stop:
 
@@ -550,7 +550,7 @@ static int dvb_usb_fe_init(struct dvb_frontend *fe)
 err:
        if (!adap->suspend_resume_active) {
                clear_bit(ADAP_INIT, &adap->state_bits);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&adap->state_bits, ADAP_INIT);
        }
 
@@ -591,7 +591,7 @@ err:
        if (!adap->suspend_resume_active) {
                adap->active_fe = -1;
                clear_bit(ADAP_SLEEP, &adap->state_bits);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&adap->state_bits, ADAP_SLEEP);
        }
 
index 9261d5313b5be2bd361612640535fbf9c2810438..dd57c7c5a3da8e011a83e257832a05326147b64e 100644 (file)
@@ -2781,7 +2781,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        case LOAD_OPEN:
                netif_tx_start_all_queues(bp->dev);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                break;
 
        case LOAD_DIAG:
@@ -4939,9 +4939,9 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
                            u32 verbose)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        set_bit(flag, &bp->sp_rtnl_state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
           flag);
        schedule_delayed_work(&bp->sp_rtnl_task, 0);
index a78edaccceee92d8f2439ac40f3b3ba887ec0000..16391db2e8c9c35d8579e5032e60d9a5435ee477 100644 (file)
@@ -1858,10 +1858,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                return;
 #endif
 
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_inc(&bp->cq_spq_left);
        /* push the change in bp->spq_left and towards the memory */
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 
        DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 
@@ -1876,11 +1876,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                 * sp_state is cleared, and this order prevents
                 * races
                 */
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
                wmb();
                clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
 
                /* schedule the sp task as mcp ack is required */
                bnx2x_schedule_sp_task(bp);
@@ -5272,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
                __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
 
                /* mark latest Q bit */
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
 
                /* send Q update ramrod for FCoE Q */
                rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -5500,7 +5500,7 @@ next_spqe:
                spqe_cnt++;
        } /* for */
 
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_add(spqe_cnt, &bp->eq_spq_left);
 
        bp->eq_cons = sw_cons;
@@ -13869,9 +13869,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
        case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
                int count = ctl->data.credit.credit_count;
 
-               smp_mb__before_atomic_inc();
+               smp_mb__before_atomic();
                atomic_add(count, &bp->cq_spq_left);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                break;
        }
        case DRV_CTL_ULP_REGISTER_CMD: {
index 31297266b743e27fa527da4636bc893b1c64cc56..d725317c42773822ee5f945960df8a66e0f3df66 100644 (file)
@@ -258,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
 
 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(o->state, o->pstate);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        set_bit(o->state, o->pstate);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 /**
@@ -2131,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
 
        /* The operation is completed */
        clear_bit(p->state, p->pstate);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        return 0;
 }
@@ -3576,16 +3576,16 @@ error_exit1:
 
 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(o->sched_state, o->raw.pstate);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        set_bit(o->sched_state, o->raw.pstate);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
@@ -4200,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
                if (rc) {
                        o->next_state = BNX2X_Q_STATE_MAX;
                        clear_bit(pending_bit, pending);
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
                        return rc;
                }
 
@@ -4288,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
        wmb();
 
        clear_bit(cmd, &o->pending);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        return 0;
 }
@@ -5279,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
        wmb();
 
        clear_bit(cmd, &o->pending);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        return 0;
 }
@@ -5926,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp,
                if (rc) {
                        o->next_state = BNX2X_F_STATE_MAX;
                        clear_bit(cmd, pending);
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
                        return rc;
                }
 
index 5c523b32db70126720dbf0b2914dcbb1a3391a2b..f82ac5ac233633cb7a557f1dba336d2ea6a5c46b 100644 (file)
@@ -1626,9 +1626,9 @@ static
 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
                                 struct bnx2x_virtf *vf)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
@@ -2960,9 +2960,9 @@ void bnx2x_iov_task(struct work_struct *work)
 
 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        set_bit(flag, &bp->iov_task_state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
        queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
 }
index 09f3fefcbf9ce405839e6f5893174a79dc91c5b8..4dd48d2fa804324c40c3989c6f29fdc939418f46 100644 (file)
@@ -436,7 +436,7 @@ static int cnic_offld_prep(struct cnic_sock *csk)
 static int cnic_close_prep(struct cnic_sock *csk)
 {
        clear_bit(SK_F_CONNECT_START, &csk->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
                while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
@@ -450,7 +450,7 @@ static int cnic_close_prep(struct cnic_sock *csk)
 static int cnic_abort_prep(struct cnic_sock *csk)
 {
        clear_bit(SK_F_CONNECT_START, &csk->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
                msleep(1);
@@ -3646,7 +3646,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk)
 
        csk_hold(csk);
        clear_bit(SK_F_INUSE, &csk->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        while (atomic_read(&csk->ref_count) != 1)
                msleep(1);
        cnic_cm_cleanup(csk);
@@ -4026,7 +4026,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
                         L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
                        set_bit(SK_F_HW_ERR, &csk->flags);
 
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
                cnic_cm_upcall(cp, csk, opcode);
                break;
index 675550fe8ee90dfe7f2c704787d7b900107f06d4..3a77f9ead004998f07ec8b4b1d01588f9d2355bd 100644 (file)
@@ -249,7 +249,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
        if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
                bna_ib_ack(tcb->i_dbell, sent);
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 
        return sent;
@@ -1126,7 +1126,7 @@ bnad_tx_cleanup(struct delayed_work *work)
 
                bnad_txq_cleanup(bnad, tcb);
 
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
        }
 
@@ -2992,7 +2992,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                        sent = bnad_txcmpl_process(bnad, tcb);
                        if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
                                bna_ib_ack(tcb->i_dbell, sent);
-                       smp_mb__before_clear_bit();
+                       smp_mb__before_atomic();
                        clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
                } else {
                        netif_stop_queue(netdev);
index 0fe7ff750d77e1618a9fd49a59bbf9577ed9190d..05613a85ce617aeb48e33ed333cb5282075189f2 100644 (file)
@@ -281,7 +281,7 @@ static int cxgb_close(struct net_device *dev)
        if (adapter->params.stats_update_period &&
            !(adapter->open_device_map & PORT_MASK)) {
                /* Stop statistics accumulation. */
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                spin_lock(&adapter->work_lock);   /* sync with update task */
                spin_unlock(&adapter->work_lock);
                cancel_mac_stats_update(adapter);
index 8b069f96e920e4dec0e2c28cb197747ea56438c5..3dfcf600fcc68bee74b41a51942c283f7bf191aa 100644 (file)
@@ -1379,7 +1379,7 @@ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
                struct sge_qset *qs = txq_to_qset(q, qid);
 
                set_bit(qid, &qs->txq_stopped);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
 
                if (should_restart_tx(q) &&
                    test_and_clear_bit(qid, &qs->txq_stopped))
@@ -1492,7 +1492,7 @@ static void restart_ctrlq(unsigned long data)
 
        if (!skb_queue_empty(&q->sendq)) {
                set_bit(TXQ_CTRL, &qs->txq_stopped);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
 
                if (should_restart_tx(q) &&
                    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
@@ -1697,7 +1697,7 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
 
                if (unlikely(q->size - q->in_use < ndesc)) {
                        set_bit(TXQ_OFLD, &qs->txq_stopped);
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
 
                        if (should_restart_tx(q) &&
                            test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
index ca95cf2954eb33f62719130a8b0432fbb324c2b6..e249528c8e60bbce6bdc53c9e0c65a2f8c95d836 100644 (file)
@@ -2031,7 +2031,7 @@ static void sge_rx_timer_cb(unsigned long data)
                        struct sge_fl *fl = s->egr_map[id];
 
                        clear_bit(id, s->starving_fl);
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
 
                        if (fl_starving(fl)) {
                                rxq = container_of(fl, struct sge_eth_rxq, fl);
index 9cfa4b4bb089d398a1b687a71d32f0856d15fa47..9d88c1d50b49ba23c2539de80b3309e08943d060 100644 (file)
@@ -1951,7 +1951,7 @@ static void sge_rx_timer_cb(unsigned long data)
                        struct sge_fl *fl = s->egr_map[id];
 
                        clear_bit(id, s->starving_fl);
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
 
                        /*
                         * Since we are accessing fl without a lock there's a
index 9125d9abf0998d31e3179bd9c712af487855d5a9..d82f092cae90598826da157cc37b1fc26ff9fb51 100644 (file)
@@ -1797,9 +1797,9 @@ void stop_gfar(struct net_device *dev)
 
        netif_tx_stop_all_queues(dev);
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        set_bit(GFAR_DOWN, &priv->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        disable_napi(priv);
 
@@ -2042,9 +2042,9 @@ int startup_gfar(struct net_device *ndev)
 
        gfar_init_tx_rx_base(priv);
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(GFAR_DOWN, &priv->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        /* Start Rx/Tx DMA and enable the interrupts */
        gfar_start(priv);
index 861b722c2672e78b0ed0ccbe2cdd2f4428d8f472..1e526c072a44ec73dcbb7c78a7e9d4404cb692f0 100644 (file)
@@ -4671,7 +4671,7 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
        BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
 
        /* flush memory to make sure state is correct before next watchog */
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(__I40E_SERVICE_SCHED, &pf->state);
 }
 
index c4c526b7f99f48e2fe4eaccfe070273bd4078c98..2fecc2626de5abe16296687307f9bee7c203b126 100644 (file)
@@ -376,7 +376,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
        BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
 
        /* flush memory to make sure state is correct before next watchdog */
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
 }
 
@@ -4671,7 +4671,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
        if (hw->mac.ops.enable_tx_laser)
                hw->mac.ops.enable_tx_laser(hw);
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
 
@@ -5567,7 +5567,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
                e_dev_err("Cannot enable PCI device from suspend\n");
                return err;
        }
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(__IXGBE_DISABLED, &adapter->state);
        pci_set_master(pdev);
 
@@ -8541,7 +8541,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
                e_err(probe, "Cannot re-enable PCI device after reset.\n");
                result = PCI_ERS_RESULT_DISCONNECT;
        } else {
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(__IXGBE_DISABLED, &adapter->state);
                adapter->hw.hw_addr = adapter->io_addr;
                pci_set_master(pdev);
index d0799e8e31e4ea08e5dc89ec64b58c76bd5840a3..de2793b06305528ee1f62da85be27a069df10306 100644 (file)
@@ -1668,7 +1668,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 
        spin_unlock_bh(&adapter->mbx_lock);
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
        ixgbevf_napi_enable_all(adapter);
 
@@ -3354,7 +3354,7 @@ static int ixgbevf_resume(struct pci_dev *pdev)
                dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
                return err;
        }
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(__IXGBEVF_DISABLED, &adapter->state);
        pci_set_master(pdev);
 
@@ -3712,7 +3712,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(__IXGBEVF_DISABLED, &adapter->state);
        pci_set_master(pdev);
 
index ed88d39134839e34510d83252949dbbb2a964e83..e71eae35336874cacca99c48d80713704142b6f1 100644 (file)
@@ -543,7 +543,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
                 * wl1271_ps_elp_wakeup cannot be called concurrently.
                 */
                clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
 
                ret = wlcore_fw_status(wl, wl->fw_status);
                if (ret < 0)
index 179b8edc22624d0d8571013b2e464eaee3a12a03..53df39a22c8acb20f6dda5dff8aa4050516dbfb2 100644 (file)
@@ -662,9 +662,9 @@ static void pcifront_do_aer(struct work_struct *data)
        notify_remote_via_evtchn(pdev->evtchn);
 
        /*in case of we lost an aer request in four lines time_window*/
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(_PDEVB_op_active, &pdev->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        schedule_pcifront_aer_op(pdev);
 
index 96a26f454673cf8b9cad6abd38ee9a28104c6a95..cc51f38b116d580b9c0c24042714f178de2247ed 100644 (file)
@@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref)
        clear_bit(IDEV_STOP_PENDING, &idev->flags);
        clear_bit(IDEV_IO_READY, &idev->flags);
        clear_bit(IDEV_GONE, &idev->flags);
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(IDEV_ALLOCATED, &idev->flags);
        wake_up(&ihost->eventq);
 }
index c886ad1c39fb357ddb6e72a384474bdc89b01278..73ab75ddaf42e9b3fc8e378cf3ae3669d1e040e8 100644 (file)
@@ -951,7 +951,7 @@ static int tcm_loop_port_link(
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 
        atomic_inc(&tl_tpg->tl_tpg_port_count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        /*
         * Add Linux/SCSI struct scsi_device by HCTL
         */
@@ -986,7 +986,7 @@ static void tcm_loop_port_unlink(
        scsi_device_put(sd);
 
        atomic_dec(&tl_tpg->tl_tpg_port_count);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
 
        pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 }
index fcbe6125b73ee51289ffe93f9d7951d3ff4485e2..0b79b852f4b2d9751d7a0f7224b63c13de39bf6a 100644 (file)
@@ -393,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
                                        continue;
 
                                atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
-                               smp_mb__after_atomic_inc();
+                               smp_mb__after_atomic();
 
                                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
@@ -404,7 +404,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
 
                                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
                                atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
-                               smp_mb__after_atomic_dec();
+                               smp_mb__after_atomic();
                                break;
                        }
                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -990,7 +990,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
                 * TARGET PORT GROUPS command
                 */
                atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 
                spin_lock_bh(&port->sep_alua_lock);
@@ -1020,7 +1020,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
 
                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
                atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
        }
        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
        /*
@@ -1054,7 +1054,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
        if (tg_pt_gp->tg_pt_gp_transition_complete)
@@ -1116,7 +1116,7 @@ static int core_alua_do_transition_tg_pt(
         */
        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
        if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
@@ -1159,7 +1159,7 @@ int core_alua_do_port_transition(
        spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
        lu_gp = local_lu_gp_mem->lu_gp;
        atomic_inc(&lu_gp->lu_gp_ref_cnt);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
        /*
         * For storage objects that are members of the 'default_lu_gp',
@@ -1176,7 +1176,7 @@ int core_alua_do_port_transition(
                rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
                                                   new_state, explicit);
                atomic_dec(&lu_gp->lu_gp_ref_cnt);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
                return rc;
        }
        /*
@@ -1190,7 +1190,7 @@ int core_alua_do_port_transition(
 
                dev = lu_gp_mem->lu_gp_mem_dev;
                atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                spin_unlock(&lu_gp->lu_gp_lock);
 
                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1219,7 +1219,7 @@ int core_alua_do_port_transition(
                                tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
                        }
                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
-                       smp_mb__after_atomic_inc();
+                       smp_mb__after_atomic();
                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
                        /*
                         * core_alua_do_transition_tg_pt() will always return
@@ -1230,7 +1230,7 @@ int core_alua_do_port_transition(
 
                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
                        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
-                       smp_mb__after_atomic_dec();
+                       smp_mb__after_atomic();
                        if (rc)
                                break;
                }
@@ -1238,7 +1238,7 @@ int core_alua_do_port_transition(
 
                spin_lock(&lu_gp->lu_gp_lock);
                atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
        }
        spin_unlock(&lu_gp->lu_gp_lock);
 
@@ -1252,7 +1252,7 @@ int core_alua_do_port_transition(
        }
 
        atomic_dec(&lu_gp->lu_gp_ref_cnt);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
        return rc;
 }
 
index 65001e1336702966108081443d5a44f39988d5af..72618776ede414cae74ba4853555a04fe233b5ef 100644 (file)
@@ -225,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
                        continue;
 
                atomic_inc(&deve->pr_ref_count);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                spin_unlock_irq(&nacl->device_list_lock);
 
                return deve;
@@ -1392,7 +1392,7 @@ int core_dev_add_initiator_node_lun_acl(
        spin_lock(&lun->lun_acl_lock);
        list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
        atomic_inc(&lun->lun_acl_count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        spin_unlock(&lun->lun_acl_lock);
 
        pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1426,7 +1426,7 @@ int core_dev_del_initiator_node_lun_acl(
        spin_lock(&lun->lun_acl_lock);
        list_del(&lacl->lacl_list);
        atomic_dec(&lun->lun_acl_count);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
        spin_unlock(&lun->lun_acl_lock);
 
        core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
index 9e0232cca92e03356c768e3dada2a27f53427452..7e6b857c6b3f18563a8325cb471780c39ade158f 100644 (file)
@@ -323,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err)
                 * Bump the ib_bio_err_cnt and release bio.
                 */
                atomic_inc(&ibr->ib_bio_err_cnt);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
        }
 
        bio_put(bio);
index 3013287a2aaa192fdacdfde7eb87bcd48eb0aff4..df357862286e10758492b8ad88dfa65e79c1582f 100644 (file)
@@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
        spin_lock(&dev->se_port_lock);
        list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
                atomic_inc(&port->sep_tg_pt_ref_cnt);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                spin_unlock(&dev->se_port_lock);
 
                spin_lock_bh(&port->sep_alua_lock);
@@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                                continue;
 
                        atomic_inc(&deve_tmp->pr_ref_count);
-                       smp_mb__after_atomic_inc();
+                       smp_mb__after_atomic();
                        spin_unlock_bh(&port->sep_alua_lock);
                        /*
                         * Grab a configfs group dependency that is released
@@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                                pr_err("core_scsi3_lunacl_depend"
                                                "_item() failed\n");
                                atomic_dec(&port->sep_tg_pt_ref_cnt);
-                               smp_mb__after_atomic_dec();
+                               smp_mb__after_atomic();
                                atomic_dec(&deve_tmp->pr_ref_count);
-                               smp_mb__after_atomic_dec();
+                               smp_mb__after_atomic();
                                goto out;
                        }
                        /*
@@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                                                sa_res_key, all_tg_pt, aptpl);
                        if (!pr_reg_atp) {
                                atomic_dec(&port->sep_tg_pt_ref_cnt);
-                               smp_mb__after_atomic_dec();
+                               smp_mb__after_atomic();
                                atomic_dec(&deve_tmp->pr_ref_count);
-                               smp_mb__after_atomic_dec();
+                               smp_mb__after_atomic();
                                core_scsi3_lunacl_undepend_item(deve_tmp);
                                goto out;
                        }
@@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
 
                spin_lock(&dev->se_port_lock);
                atomic_dec(&port->sep_tg_pt_ref_cnt);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
        }
        spin_unlock(&dev->se_port_lock);
 
@@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
                                        continue;
                        }
                        atomic_inc(&pr_reg->pr_res_holders);
-                       smp_mb__after_atomic_inc();
+                       smp_mb__after_atomic();
                        spin_unlock(&pr_tmpl->registration_lock);
                        return pr_reg;
                }
@@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
                        continue;
 
                atomic_inc(&pr_reg->pr_res_holders);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                spin_unlock(&pr_tmpl->registration_lock);
                return pr_reg;
        }
@@ -1155,7 +1155,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
 static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
 {
        atomic_dec(&pr_reg->pr_res_holders);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
 }
 
 static int core_scsi3_check_implicit_release(
@@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
                        &tpg->tpg_group.cg_item);
 
        atomic_dec(&tpg->tpg_pr_ref_count);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
 }
 
 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
 
        if (nacl->dynamic_node_acl) {
                atomic_dec(&nacl->acl_pr_ref_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
                return;
        }
 
@@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
                        &nacl->acl_group.cg_item);
 
        atomic_dec(&nacl->acl_pr_ref_count);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
 }
 
 static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
         */
        if (!lun_acl) {
                atomic_dec(&se_deve->pr_ref_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
                return;
        }
        nacl = lun_acl->se_lun_nacl;
@@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
                        &lun_acl->se_lun_group.cg_item);
 
        atomic_dec(&se_deve->pr_ref_count);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
 }
 
 static sense_reason_t
@@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port(
                                continue;
 
                        atomic_inc(&tmp_tpg->tpg_pr_ref_count);
-                       smp_mb__after_atomic_inc();
+                       smp_mb__after_atomic();
                        spin_unlock(&dev->se_port_lock);
 
                        if (core_scsi3_tpg_depend_item(tmp_tpg)) {
                                pr_err(" core_scsi3_tpg_depend_item()"
                                        " for tmp_tpg\n");
                                atomic_dec(&tmp_tpg->tpg_pr_ref_count);
-                               smp_mb__after_atomic_dec();
+                               smp_mb__after_atomic();
                                ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                                goto out_unmap;
                        }
@@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port(
                                                tmp_tpg, i_str);
                        if (dest_node_acl) {
                                atomic_inc(&dest_node_acl->acl_pr_ref_count);
-                               smp_mb__after_atomic_inc();
+                               smp_mb__after_atomic();
                        }
                        spin_unlock_irq(&tmp_tpg->acl_node_lock);
 
@@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port(
                                pr_err("configfs_depend_item() failed"
                                        " for dest_node_acl->acl_group\n");
                                atomic_dec(&dest_node_acl->acl_pr_ref_count);
-                               smp_mb__after_atomic_dec();
+                               smp_mb__after_atomic();
                                core_scsi3_tpg_undepend_item(tmp_tpg);
                                ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                                goto out_unmap;
@@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port(
                        pr_err("core_scsi3_lunacl_depend_item()"
                                        " failed\n");
                        atomic_dec(&dest_se_deve->pr_ref_count);
-                       smp_mb__after_atomic_dec();
+                       smp_mb__after_atomic();
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -3168,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
                        continue;
 
                atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                spin_unlock(&dev->se_port_lock);
 
                if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
                        pr_err("core_scsi3_tpg_depend_item() failed"
                                " for dest_se_tpg\n");
                        atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
-                       smp_mb__after_atomic_dec();
+                       smp_mb__after_atomic();
                        ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto out_put_pr_reg;
                }
@@ -3273,7 +3273,7 @@ after_iport_check:
                                initiator_str);
        if (dest_node_acl) {
                atomic_inc(&dest_node_acl->acl_pr_ref_count);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
        }
        spin_unlock_irq(&dest_se_tpg->acl_node_lock);
 
@@ -3289,7 +3289,7 @@ after_iport_check:
                pr_err("core_scsi3_nodeacl_depend_item() for"
                        " dest_node_acl\n");
                atomic_dec(&dest_node_acl->acl_pr_ref_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
                dest_node_acl = NULL;
                ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
@@ -3314,7 +3314,7 @@ after_iport_check:
        if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
                pr_err("core_scsi3_lunacl_depend_item() failed\n");
                atomic_dec(&dest_se_deve->pr_ref_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
                dest_se_deve = NULL;
                ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                goto out;
@@ -3880,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                add_desc_len = 0;
 
                atomic_inc(&pr_reg->pr_res_holders);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                spin_unlock(&pr_tmpl->registration_lock);
                /*
                 * Determine expected length of $FABRIC_MOD specific
@@ -3894,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                                " out of buffer: %d\n", cmd->data_length);
                        spin_lock(&pr_tmpl->registration_lock);
                        atomic_dec(&pr_reg->pr_res_holders);
-                       smp_mb__after_atomic_dec();
+                       smp_mb__after_atomic();
                        break;
                }
                /*
@@ -3956,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
 
                spin_lock(&pr_tmpl->registration_lock);
                atomic_dec(&pr_reg->pr_res_holders);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
                /*
                 * Set the ADDITIONAL DESCRIPTOR LENGTH
                 */
index d4b98690a73680244676b6e608ede6c85ff724cb..4badca1cd625cea95bb052905e53e3bb5eae9e56 100644 (file)
@@ -736,7 +736,7 @@ void target_qf_do_work(struct work_struct *work)
        list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
                list_del(&cmd->se_qf_node);
                atomic_dec(&dev->dev_qf_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
 
                pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
                        " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1148,7 +1148,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
         * Dormant to Active status.
         */
        cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
                        cmd->se_ordered_id, cmd->sam_task_attr,
                        dev->transport->name);
@@ -1705,7 +1705,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
                return false;
        case MSG_ORDERED_TAG:
                atomic_inc(&dev->dev_ordered_sync);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
 
                pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
                         " se_ordered_id: %u\n",
@@ -1723,7 +1723,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
                 * For SIMPLE and UNTAGGED Task Attribute commands
                 */
                atomic_inc(&dev->simple_cmds);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                break;
        }
 
@@ -1828,7 +1828,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 
        if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
                atomic_dec(&dev->simple_cmds);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1840,7 +1840,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
                atomic_dec(&dev->dev_ordered_sync);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
 
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1899,7 +1899,7 @@ static void transport_handle_queue_full(
        spin_lock_irq(&dev->qf_cmd_lock);
        list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
        atomic_inc(&dev->dev_qf_count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
 
        schedule_work(&cmd->se_dev->qf_work_queue);
@@ -2875,7 +2875,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        cmd->transport_state |= CMD_T_ABORTED;
                        cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
-                       smp_mb__after_atomic_inc();
+                       smp_mb__after_atomic();
                        return;
                }
        }
index 505519b10cb75bdb1e7e4c204e89f7c570500821..101858e245b36c3ad7da937ec3637e7ec32650d6 100644 (file)
@@ -162,7 +162,7 @@ int core_scsi3_ua_allocate(
                spin_unlock_irq(&nacl->device_list_lock);
 
                atomic_inc(&deve->ua_count);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                return 0;
        }
        list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -175,7 +175,7 @@ int core_scsi3_ua_allocate(
                asc, ascq);
 
        atomic_inc(&deve->ua_count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        return 0;
 }
 
@@ -190,7 +190,7 @@ void core_scsi3_ua_release_all(
                kmem_cache_free(se_ua_cache, ua);
 
                atomic_dec(&deve->ua_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
        }
        spin_unlock(&deve->ua_lock);
 }
@@ -251,7 +251,7 @@ void core_scsi3_ua_for_check_condition(
                kmem_cache_free(se_ua_cache, ua);
 
                atomic_dec(&deve->ua_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
        }
        spin_unlock(&deve->ua_lock);
        spin_unlock_irq(&nacl->device_list_lock);
@@ -310,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense(
                kmem_cache_free(se_ua_cache, ua);
 
                atomic_dec(&deve->ua_count);
-               smp_mb__after_atomic_dec();
+               smp_mb__after_atomic();
        }
        spin_unlock(&deve->ua_lock);
        spin_unlock_irq(&nacl->device_list_lock);
index 41fe8a047d373cf84b14a9a2f5d8f41e07fd3b5f..746ae80b972f4f40bad5099f6f74770ec70b995e 100644 (file)
@@ -2041,7 +2041,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
 
        if (found)
                clear_bit(eol, ldata->read_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        ldata->read_tail += c;
 
        if (found) {
index aa97fd845b4d8123102c6026ed9a79e53d61e45f..4b5b3c2fe328710ef821e9e0a29813fdf21ae639 100644 (file)
@@ -200,7 +200,7 @@ static void dma_tx_callback(void *param)
 
        /* clear the bit used to serialize the DMA tx. */
        clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        /* wake up the possible processes. */
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -275,7 +275,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
                        mxs_auart_dma_tx(s, i);
                } else {
                        clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags);
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
                }
                return;
        }
index f058c0368d61a4b4663bca3bd58a3558e6c5ec04..819875c7e3941719e2d3c6573a60e60e4a0be6d7 100644 (file)
@@ -1851,7 +1851,7 @@ static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
        struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
 
        atomic_inc(&tpg->tpg_port_count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        return 0;
 }
 
@@ -1861,7 +1861,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg,
        struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
 
        atomic_dec(&tpg->tpg_port_count);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
 }
 
 static int usbg_check_stop_free(struct se_cmd *se_cmd)
index 640fe0173236807c3178d53265da8588a01cb730..f1ec1680e8221c2e6b49d2f497d99e58f955ba1b 100644 (file)
@@ -325,7 +325,7 @@ static void usb_wwan_outdat_callback(struct urb *urb)
 
        for (i = 0; i < N_OUT_URB; ++i) {
                if (portdata->out_urbs[i] == urb) {
-                       smp_mb__before_clear_bit();
+                       smp_mb__before_atomic();
                        clear_bit(i, &portdata->out_busy);
                        break;
                }
index cf50ce93975bcddb240c3356b4326aac0fc8c3cb..aeb513108448d7c87e3dfa6cf43accec47b13cd4 100644 (file)
@@ -1255,7 +1255,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                        tpg->tv_tpg_vhost_count++;
                        tpg->vhost_scsi = vs;
                        vs_tpg[tpg->tport_tpgt] = tpg;
-                       smp_mb__after_atomic_inc();
+                       smp_mb__after_atomic();
                        match = true;
                }
                mutex_unlock(&tpg->tv_tpg_mutex);
index 3bff6b37b4727f5ad692d31e6fcbdf62ab07b34a..3651ec801f453dc0824af7c56a5b5cf7242e81d0 100644 (file)
@@ -139,9 +139,9 @@ void w1_family_get(struct w1_family *f)
 
 void __w1_family_get(struct w1_family *f)
 {
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_inc(&f->refcnt);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 }
 
 EXPORT_SYMBOL(w1_unregister_family);
index 607e41460c0d7ef0dcea4e8702eab0ec3e1cdf8a..c4a0666de6f5e08a17edf4fcbcb001478d0e303b 100644 (file)
@@ -348,9 +348,9 @@ void xen_pcibk_do_op(struct work_struct *data)
        notify_remote_via_irq(pdev->evtchn_irq);
 
        /* Mark that we're done. */
-       smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
+       smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
        clear_bit(_PDEVF_op_active, &pdev->flags);
-       smp_mb__after_clear_bit(); /* /before/ final check for work */
+       smp_mb__after_atomic(); /* /before/ final check for work */
 
        /* Check to see if the driver domain tried to start another request in
         * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
index c9a24444ec9a317483c4486fb132b6893f57a19d..2256e9cceec54da0a66d450b3b2aaf078499f5ac 100644 (file)
@@ -279,7 +279,7 @@ static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
 
 static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
                  &BTRFS_I(inode)->runtime_flags);
 }
index 3955e475ceece295ea9db55def2178a319a817cc..f29a54e454d4f8ce034e8786c9dcce6e0fab7112 100644 (file)
@@ -3458,7 +3458,7 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
 static void end_extent_buffer_writeback(struct extent_buffer *eb)
 {
        clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
 }
 
index 5f805bc944fad00127cf812a922e54ccd9f7b396..5a3b8371772e33d5d03c8714772f8f71b3248cf1 100644 (file)
@@ -7126,7 +7126,7 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
                 * before atomic variable goto zero, we must make sure
                 * dip->errors is perceived to be set.
                 */
-               smp_mb__before_atomic_dec();
+               smp_mb__before_atomic();
        }
 
        /* if there are more bios still pending for this dio, just exit */
@@ -7306,7 +7306,7 @@ out_err:
         * before atomic variable goto zero, we must
         * make sure dip->errors is perceived to be set.
         */
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        if (atomic_dec_and_test(&dip->pending_bios))
                bio_io_error(dip->orig_bio);
 
@@ -7449,7 +7449,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                return 0;
 
        atomic_inc(&inode->i_dio_count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 
        /*
         * The generic stuff only does filemap_write_and_wait_range, which
index e79ff6b90cb71bb131426b97838c369ae0e6f48c..f45040a4bb76b40de3a677f2e1a8dadc43dcbe79 100644 (file)
@@ -642,7 +642,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
                return -EINVAL;
 
        atomic_inc(&root->will_be_snapshoted);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        btrfs_wait_nocow_write(root);
 
        ret = btrfs_start_delalloc_inodes(root, 0);
index 9ddb9fc7d923fa31299a8aba228f61973d3b429f..6a8110c03a4734c7efa0ad2e90ff22fe4b365485 100644 (file)
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(__lock_buffer);
 void unlock_buffer(struct buffer_head *bh)
 {
        clear_bit_unlock(BH_Lock, &bh->b_state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&bh->b_state, BH_Lock);
 }
 EXPORT_SYMBOL(unlock_buffer);
index f3b84cd9de566ff7b1ffb5d16e75483e9b9d06f2..08b3c116915b8289faccc489c8876382a565351d 100644 (file)
@@ -42,7 +42,7 @@ int ext4_resize_begin(struct super_block *sb)
 void ext4_resize_end(struct super_block *sb)
 {
        clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
index aec7f73832f080c3fd5288cdc3722162b72257bd..c355f7320e448bfe30a8c325b2448d707f8c19be 100644 (file)
@@ -277,7 +277,7 @@ static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holde
 static void gfs2_holder_wake(struct gfs2_holder *gh)
 {
        clear_bit(HIF_WAIT, &gh->gh_iflags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 }
 
@@ -411,7 +411,7 @@ static void gfs2_demote_wake(struct gfs2_glock *gl)
 {
        gl->gl_demote_state = LM_ST_EXCLUSIVE;
        clear_bit(GLF_DEMOTE, &gl->gl_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 }
 
@@ -620,7 +620,7 @@ out:
 
 out_sched:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        gl->gl_lockref.count++;
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                gl->gl_lockref.count--;
@@ -628,7 +628,7 @@ out_sched:
 
 out_unlock:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        return;
 }
 
index 54b66809e818e4fbffd46c186f7409948e35e3a2..74d9a3dbf16fe868826cff848f9fbb1c6e008f17 100644 (file)
@@ -221,7 +221,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
         * Writeback of the data mapping may cause the dirty flag to be set
         * so we have to clear it again here.
         */
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(GLF_DIRTY, &gl->gl_flags);
 }
 
index c1eb555dc588e178cbbeee5e6771563a29f4a81a..91f274de1246cabce0f052ede54dea53cf404cfc 100644 (file)
@@ -1134,7 +1134,7 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
                queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
 
        clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
        spin_unlock(&ls->ls_recover_spin);
 }
@@ -1271,7 +1271,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
 
        ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
        clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
        return 0;
 
index 7ad4094d68c0243981ea244b4ac75914e66f0ab7..fe7a56fb608476215aaba6188885ee70e9c44341 100644 (file)
@@ -587,7 +587,7 @@ fail:
        gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
 done:
        clear_bit(JDF_RECOVERY, &jd->jd_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
 }
 
index de25d5577e5dd48869354f8e1e1a2e0ec514e35d..529d9a9eb89765093969837fba6cf44fb374f38e 100644 (file)
@@ -333,7 +333,7 @@ static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
                set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
        else if (val == 0) {
                clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                gfs2_glock_thaw(sdp);
        } else {
                ret = -EINVAL;
@@ -482,7 +482,7 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
                rv = jid = -EINVAL;
        sdp->sd_lockstruct.ls_jid = jid;
        clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
 out:
        spin_unlock(&sdp->sd_jindex_spin);
index 5f26139a165a7081dd2f835222f26dcbc5d3bf4a..6fac743498565746bfb6dd5f29e0abacb48a0865 100644 (file)
@@ -43,7 +43,7 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
                clear_buffer_uptodate(bh);
        if (orig_bh) {
                clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&orig_bh->b_state, BH_Shadow);
        }
        unlock_buffer(bh);
@@ -239,7 +239,7 @@ static int journal_submit_data_buffers(journal_t *journal,
                spin_lock(&journal->j_list_lock);
                J_ASSERT(jinode->i_transaction == commit_transaction);
                clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
        }
        spin_unlock(&journal->j_list_lock);
@@ -277,7 +277,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
                }
                spin_lock(&journal->j_list_lock);
                clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
        }
 
index d9f3d067cd15635ffd0bb569ef76a156a2d84630..4a3d4ef76127bc716028d3d9df25791d91ff76ce 100644 (file)
@@ -2032,9 +2032,9 @@ static void nfs_access_free_entry(struct nfs_access_entry *entry)
 {
        put_rpccred(entry->cred);
        kfree(entry);
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_long_dec(&nfs_access_nr_entries);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
 }
 
 static void nfs_access_free_list(struct list_head *head)
@@ -2082,9 +2082,9 @@ nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
                else {
 remove_lru_entry:
                        list_del_init(&nfsi->access_cache_inode_lru);
-                       smp_mb__before_clear_bit();
+                       smp_mb__before_atomic();
                        clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
                }
                spin_unlock(&inode->i_lock);
        }
@@ -2232,9 +2232,9 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
        nfs_access_add_rbtree(inode, cache);
 
        /* Update accounting */
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_long_inc(&nfs_access_nr_entries);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 
        /* Add inode to global LRU list */
        if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
index 0c438973f3c8687836fc16bae3fb9aa89a767727..e6f7398d2b3cac22b30c9515985b07a1caff9bc1 100644 (file)
@@ -1085,7 +1085,7 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
        trace_nfs_invalidate_mapping_exit(inode, ret);
 
        clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(bitlock, NFS_INO_INVALIDATING);
 out:
        return ret;
index efac602edb37027a944601621baa8361c09f9fc9..b9c61efe96600476db344d976b05781750c3daa2 100644 (file)
@@ -789,9 +789,9 @@ static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
 
 static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
 }
 
index 2349518eef2c28c9c74692335252b4a3877f4d22..c0583b9bef716a316c7f2554786345a958c6427c 100644 (file)
@@ -1140,9 +1140,9 @@ static int nfs4_run_state_manager(void *);
 
 static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
        rpc_wake_up(&clp->cl_rpcwaitq);
 }
index 2ffebf2081ceb5c779cf7f53644fa453bff45ca2..03ed984ab4d8016e3e062a7fc95c2b3268e6287c 100644 (file)
@@ -95,7 +95,7 @@ nfs_iocounter_dec(struct nfs_io_counter *c)
 {
        if (atomic_dec_and_test(&c->io_count)) {
                clear_bit(NFS_IO_INPROGRESS, &c->flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
        }
 }
@@ -193,9 +193,9 @@ void nfs_unlock_request(struct nfs_page *req)
                printk(KERN_ERR "NFS: Invalid unlock attempted\n");
                BUG();
        }
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(PG_BUSY, &req->wb_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&req->wb_flags, PG_BUSY);
 }
 
index cb53d450ae321e4464e9dee7d9a4aa6ef2074b9e..fd9536e494bc202184178449b5316c780a1b8e8f 100644 (file)
@@ -1810,7 +1810,7 @@ static void pnfs_clear_layoutcommitting(struct inode *inode)
        unsigned long *bitlock = &NFS_I(inode)->flags;
 
        clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
 }
 
index 023793909778ef975b3441d19b8d8632845bc2c4..c3058a076596f954f4a22379e71a69600b67c834 100644 (file)
@@ -275,7 +275,7 @@ pnfs_get_lseg(struct pnfs_layout_segment *lseg)
 {
        if (lseg) {
                atomic_inc(&lseg->pls_refcount);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
        }
        return lseg;
 }
index 9a3b6a4cd6b9581a037f2508e5b6d4aca565cd93..ffb9459f180bc73f3d7d499c14f5ba4c0b219968 100644 (file)
@@ -405,7 +405,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
        nfs_pageio_complete(&pgio);
 
        clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(bitlock, NFS_INO_FLUSHING);
 
        if (err < 0)
@@ -1458,7 +1458,7 @@ static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
 static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
 {
        clear_bit(NFS_INO_COMMIT, &nfsi->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
 }
 
index 4b826abb1528f57035b2ebb47ccfc4ee458e0ff8..45d4e96a6bac0fe36f67b268ada5fea066627eee 100644 (file)
@@ -460,9 +460,9 @@ static int write_cnodes(struct ubifs_info *c)
                 * important.
                 */
                clear_bit(DIRTY_CNODE, &cnode->flags);
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(COW_CNODE, &cnode->flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                offs += len;
                dbg_chk_lpt_sz(c, 1, len);
                cnode = cnode->cnext;
index 52a6559275c43eb350d818d10797879ec0c1b9f4..3600994f84112e99b4b6d3a4a21209cd6f9738ff 100644 (file)
@@ -895,9 +895,9 @@ static int write_index(struct ubifs_info *c)
                 * the reason for the second barrier.
                 */
                clear_bit(DIRTY_ZNODE, &znode->flags);
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(COW_ZNODE, &znode->flags);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
 
                /*
                 * We have marked the znode as clean but have not updated the
index 9ae6c34dc191e73adc4b6402618a05dae773f243..49673510b484157cefd9d7e94393bd6226ab443e 100644 (file)
@@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
  *
  * clear_bit() is atomic and may not be reordered.  However, it does
  * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  * in order to ensure changes are visible on other processors.
  */
 static inline void clear_bit(int nr, volatile unsigned long *addr)
index 308a9e22c802192f2fd65d99eff34fe3216426e1..c30266e9480650aa51b3f05b3d59ceedd5062e04 100644 (file)
@@ -20,7 +20,7 @@
  */
 #define clear_bit_unlock(nr, addr)     \
 do {                                   \
-       smp_mb__before_clear_bit();     \
+       smp_mb__before_atomic();        \
        clear_bit(nr, addr);            \
 } while (0)
 
index c40302f909ce00910260e2818dd712d658257a77..7cbf837a279c02f12baac4241a93e5786f3b3547 100644 (file)
@@ -278,7 +278,7 @@ static inline void get_bh(struct buffer_head *bh)
 
 static inline void put_bh(struct buffer_head *bh)
 {
-        smp_mb__before_atomic_dec();
+        smp_mb__before_atomic();
         atomic_dec(&bh->b_count);
 }
 
index 9f3c275e053efbb8a723917cc85407b528431ae1..ec274e0f4ed28d1834bb7b3115bd1010f92396a8 100644 (file)
@@ -649,7 +649,7 @@ static inline void hd_ref_init(struct hd_struct *part)
 static inline void hd_struct_get(struct hd_struct *part)
 {
        atomic_inc(&part->ref);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 }
 
 static inline int hd_struct_try_get(struct hd_struct *part)
index c7bfac1c4a7b8f6c82742b4d9f97c058131ae4fc..1571110432812839fb2f2bc479bc2d4fd3d223fd 100644 (file)
@@ -453,7 +453,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
 
 static inline void tasklet_unlock(struct tasklet_struct *t)
 {
-       smp_mb__before_clear_bit(); 
+       smp_mb__before_atomic();
        clear_bit(TASKLET_STATE_RUN, &(t)->state);
 }
 
@@ -501,7 +501,7 @@ static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
 {
        atomic_inc(&t->count);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 }
 
 static inline void tasklet_disable(struct tasklet_struct *t)
@@ -513,13 +513,13 @@ static inline void tasklet_disable(struct tasklet_struct *t)
 
 static inline void tasklet_enable(struct tasklet_struct *t)
 {
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&t->count);
 }
 
 static inline void tasklet_hi_enable(struct tasklet_struct *t)
 {
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&t->count);
 }
 
index 7ed3a3aa6604b7b8511ea709ea2c2312a1d38bb2..616415a4fee48bf73d1ea882249a1ef17c3e99f0 100644 (file)
@@ -493,7 +493,7 @@ static inline void napi_disable(struct napi_struct *n)
 static inline void napi_enable(struct napi_struct *n)
 {
        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(NAPI_STATE_SCHED, &n->state);
 }
 
index 25f54c79f75772a9f133c585e17a2d8e4a59e8ac..010cde3b44cb025fac86df9fdd150c9052d6f05a 100644 (file)
@@ -2782,10 +2782,8 @@ static inline bool __must_check current_set_polling_and_test(void)
        /*
         * Polling state must be visible before we test NEED_RESCHED,
         * paired by resched_task()
-        *
-        * XXX: assumes set/clear bit are identical barrier wise.
         */
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        return unlikely(tif_need_resched());
 }
@@ -2803,7 +2801,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
         * Polling state must be visible before we test NEED_RESCHED,
         * paired by resched_task()
         */
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        return unlikely(tif_need_resched());
 }
index 3a847de83fabd5757ddcee00910a6ab991f79a98..ad7dbe2cfecd840e8174fb3fe2e49e774c772d9b 100644 (file)
@@ -142,18 +142,18 @@ struct rpc_task_setup {
                                test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
 #define rpc_clear_running(t)   \
        do { \
-               smp_mb__before_clear_bit(); \
+               smp_mb__before_atomic(); \
                clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
-               smp_mb__after_clear_bit(); \
+               smp_mb__after_atomic(); \
        } while (0)
 
 #define RPC_IS_QUEUED(t)       test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
 #define rpc_set_queued(t)      set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
 #define rpc_clear_queued(t)    \
        do { \
-               smp_mb__before_clear_bit(); \
+               smp_mb__before_atomic(); \
                clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
-               smp_mb__after_clear_bit(); \
+               smp_mb__after_atomic(); \
        } while (0)
 
 #define RPC_IS_ACTIVATED(t)    test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
index 3e5efb2b236e2198847460cf2bd943b99b1b1cf0..3876f0f1dfd38115a1ac981d11f1aea641c03041 100644 (file)
@@ -379,9 +379,9 @@ static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
 
 static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(XPRT_CONNECTING, &xprt->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static inline int xprt_connecting(struct rpc_xprt *xprt)
@@ -411,9 +411,9 @@ static inline void xprt_clear_bound(struct rpc_xprt *xprt)
 
 static inline void xprt_clear_binding(struct rpc_xprt *xprt)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(XPRT_BINDING, &xprt->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
index 1e98b553042598e0d1d6a41337bec8194ee54a63..6f8ab7da27c43c47b82b25eb5198a82ca0d8487e 100644 (file)
@@ -191,7 +191,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
         * pairs with task_work_add()->set_notify_resume() after
         * hlist_add_head(task->task_works);
         */
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        if (unlikely(current->task_works))
                task_work_run();
 }
index 5679d927562be7b0225c68468a498ded903bd36a..624a8a54806d4c877cabcbefec9dd975e1204922 100644 (file)
@@ -1204,7 +1204,7 @@ static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
 /* put back the conn without restarting its timer */
 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
 {
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&cp->refcnt);
 }
 void ip_vs_conn_put(struct ip_vs_conn *cp);
@@ -1408,7 +1408,7 @@ static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
 
 static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
 {
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&dest->refcnt);
 }
 
index 2956c8da16055a70c23bda77804b203abc9a6fe9..1adf62b39b96b496e56ca8484c6939429981277c 100644 (file)
@@ -534,7 +534,7 @@ return_normal:
                        kgdb_info[cpu].exception_state &=
                                ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
                        kgdb_info[cpu].enter_kgdb--;
-                       smp_mb__before_atomic_dec();
+                       smp_mb__before_atomic();
                        atomic_dec(&slaves_in_kgdb);
                        dbg_touch_watchdogs();
                        local_irq_restore(flags);
@@ -662,7 +662,7 @@ kgdb_restore:
        kgdb_info[cpu].exception_state &=
                ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
        kgdb_info[cpu].enter_kgdb--;
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&masters_in_kgdb);
        /* Free kgdb_active */
        atomic_set(&kgdb_active, -1);
index 5f589279e4626c47b75b8a1d193d089f24a2f7b9..b991ec05b8f931536b149fd717ec9c1a44dcdf83 100644 (file)
@@ -267,7 +267,7 @@ static inline void futex_get_mm(union futex_key *key)
         * get_futex_key() implies a full barrier. This is relied upon
         * as full barrier (B), see the ordering comment above.
         */
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 }
 
 /*
@@ -280,7 +280,7 @@ static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
        /*
         * Full barrier (A), see the ordering comment above.
         */
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 #endif
 }
 
index 6b375af4958d1290c5c7a066903203802ee41d77..0ac67a5861c5c66697c3f705c790f1faf70fc348 100644 (file)
@@ -498,7 +498,7 @@ int __usermodehelper_disable(enum umh_disable_depth depth)
 static void helper_lock(void)
 {
        atomic_inc(&running_helpers);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
 }
 
 static void helper_unlock(void)
index 0c47e300210ad61c3d79854426150fd5246f5750..88b4a1dcb58c7163e3249a4e156c4453d0e91004 100644 (file)
@@ -387,9 +387,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
        }
        rcu_prepare_for_idle(smp_processor_id());
        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
-       smp_mb__before_atomic_inc();  /* See above. */
+       smp_mb__before_atomic();  /* See above. */
        atomic_inc(&rdtp->dynticks);
-       smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
+       smp_mb__after_atomic();  /* Force ordering with next sojourn. */
        WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 
        /*
@@ -507,10 +507,10 @@ void rcu_irq_exit(void)
 static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
                               int user)
 {
-       smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
+       smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
        atomic_inc(&rdtp->dynticks);
        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
-       smp_mb__after_atomic_inc();  /* See above. */
+       smp_mb__after_atomic();  /* See above. */
        WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
        rcu_cleanup_after_idle(smp_processor_id());
        trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
@@ -635,10 +635,10 @@ void rcu_nmi_enter(void)
            (atomic_read(&rdtp->dynticks) & 0x1))
                return;
        rdtp->dynticks_nmi_nesting++;
-       smp_mb__before_atomic_inc();  /* Force delay from prior write. */
+       smp_mb__before_atomic();  /* Force delay from prior write. */
        atomic_inc(&rdtp->dynticks);
        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
-       smp_mb__after_atomic_inc();  /* See above. */
+       smp_mb__after_atomic();  /* See above. */
        WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
 }
 
@@ -657,9 +657,9 @@ void rcu_nmi_exit(void)
            --rdtp->dynticks_nmi_nesting != 0)
                return;
        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
-       smp_mb__before_atomic_inc();  /* See above. */
+       smp_mb__before_atomic();  /* See above. */
        atomic_inc(&rdtp->dynticks);
-       smp_mb__after_atomic_inc();  /* Force delay to next write. */
+       smp_mb__after_atomic();  /* Force delay to next write. */
        WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 }
 
@@ -2790,7 +2790,7 @@ void synchronize_sched_expedited(void)
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
                        /* ensure test happens before caller kfree */
-                       smp_mb__before_atomic_inc(); /* ^^^ */
+                       smp_mb__before_atomic(); /* ^^^ */
                        atomic_long_inc(&rsp->expedited_workdone1);
                        return;
                }
@@ -2808,7 +2808,7 @@ void synchronize_sched_expedited(void)
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
                        /* ensure test happens before caller kfree */
-                       smp_mb__before_atomic_inc(); /* ^^^ */
+                       smp_mb__before_atomic(); /* ^^^ */
                        atomic_long_inc(&rsp->expedited_workdone2);
                        return;
                }
@@ -2837,7 +2837,7 @@ void synchronize_sched_expedited(void)
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
                        /* ensure test happens before caller kfree */
-                       smp_mb__before_atomic_inc(); /* ^^^ */
+                       smp_mb__before_atomic(); /* ^^^ */
                        atomic_long_inc(&rsp->expedited_done_lost);
                        break;
                }
index 962d1d589929e2b9c8350bd4432d96c4e31017f2..56db2f853e432a2e0c7f95014b8ddd51c8f1d1f5 100644 (file)
@@ -2523,9 +2523,9 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
        /* Record start of fully idle period. */
        j = jiffies;
        ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_inc(&rdtp->dynticks_idle);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
 }
 
@@ -2590,9 +2590,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
        }
 
        /* Record end of idle period. */
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_inc(&rdtp->dynticks_idle);
-       smp_mb__after_atomic_inc();
+       smp_mb__after_atomic();
        WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
 
        /*
index 8b836b376d9129760066326eabf5040f72b2e4f3..746bc9344969a6641b42c97d00056b886b816ee4 100644 (file)
@@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
                 * do a write memory barrier, and then update the count, to
                 * make sure the vector is visible when count is set.
                 */
-               smp_mb__before_atomic_inc();
+               smp_mb__before_atomic();
                atomic_inc(&(vec)->count);
                do_mb = 1;
        }
@@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
                 * the new priority vec.
                 */
                if (do_mb)
-                       smp_mb__after_atomic_inc();
+                       smp_mb__after_atomic();
 
                /*
                 * When removing from the vector, we decrement the counter first
                 * do a memory barrier and then clear the mask.
                 */
                atomic_dec(&(vec)->count);
-               smp_mb__after_atomic_inc();
+               smp_mb__after_atomic();
                cpumask_clear_cpu(cpu, vec->mask);
        }
 
index 7d50f794e24802b024393d0c287fbfb1256d0b17..0ffa20ae657b5510b20425ad17acbf8390dcac51 100644 (file)
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(__wake_up_bit);
  *
  * In order for this to function properly, as it uses waitqueue_active()
  * internally, some kind of memory barrier must be done prior to calling
- * this. Typically, this will be smp_mb__after_clear_bit(), but in some
+ * this. Typically, this will be smp_mb__after_atomic(), but in some
  * cases where bitflags are manipulated non-atomically under a lock, one
  * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
  * because spin_unlock() does not guarantee a memory barrier.
index 09d9591b770806c5beed5a27b2cd37b29e68ce5c..1706cbbdf5f0381aaf81f21f6bc47b1133e2746b 100644 (file)
@@ -557,7 +557,7 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
        bit = sync ? BDI_sync_congested : BDI_async_congested;
        if (test_and_clear_bit(bit, &bdi->state))
                atomic_dec(&nr_bdi_congested[sync]);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        if (waitqueue_active(wqh))
                wake_up(wqh);
 }
index a82fbe4c9e8e1c1d5a3eed5e2649ec87a7bfd16d..c73535c914cc3fe7bd71a566c1d51a823bace480 100644 (file)
@@ -740,7 +740,7 @@ void unlock_page(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        clear_bit_unlock(PG_locked, &page->flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_page(page, PG_locked);
 }
 EXPORT_SYMBOL(unlock_page);
@@ -757,7 +757,7 @@ void end_page_writeback(struct page *page)
        if (!test_clear_page_writeback(page))
                BUG();
 
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        wake_up_page(page, PG_writeback);
 }
 EXPORT_SYMBOL(end_page_writeback);
index 8c93267ce96910a48f9eb9e5bb8e295470656edf..c4e09846d1dea258b7ca8e02a4d9510b9d8d0ba1 100644 (file)
@@ -252,7 +252,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
         * we need to ensure there's a memory barrier after it. The bit
         * *must* be set before we do the atomic_inc() on pvcc->inflight.
         * There's no smp_mb__after_set_bit(), so it's this or abuse
-        * smp_mb__after_clear_bit().
+        * smp_mb__after_atomic().
         */
        test_and_set_bit(BLOCKED, &pvcc->blocked);
 
index 49774912cb01f23ef6f85cb26f8613f538edec94..74014420b3c7daff29c2ebbfb8fa228efac3aa78 100644 (file)
@@ -45,7 +45,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
                return;
 
        clear_bit(HCI_INQUIRY, &hdev->flags);
-       smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+       smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
        hci_conn_check_pending(hdev);
@@ -1768,7 +1768,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
                return;
 
-       smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+       smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
        if (!test_bit(HCI_MGMT, &hdev->dev_flags))
index 5b3042e69f85646961a910977e041fc16a94cc78..e14f1cba591abf0db2f196b1332253f0fa22368c 100644 (file)
@@ -1326,7 +1326,7 @@ static int __dev_close_many(struct list_head *head)
                 * dev->stop() will invoke napi_disable() on all of it's
                 * napi_struct instances on this device.
                 */
-               smp_mb__after_clear_bit(); /* Commit netif_running(). */
+               smp_mb__after_atomic(); /* Commit netif_running(). */
        }
 
        dev_deactivate_many(head);
@@ -3343,7 +3343,7 @@ static void net_tx_action(struct softirq_action *h)
 
                        root_lock = qdisc_lock(q);
                        if (spin_trylock(root_lock)) {
-                               smp_mb__before_clear_bit();
+                               smp_mb__before_atomic();
                                clear_bit(__QDISC_STATE_SCHED,
                                          &q->state);
                                qdisc_run(q);
@@ -3353,7 +3353,7 @@ static void net_tx_action(struct softirq_action *h)
                                              &q->state)) {
                                        __netif_reschedule(q);
                                } else {
-                                       smp_mb__before_clear_bit();
+                                       smp_mb__before_atomic();
                                        clear_bit(__QDISC_STATE_SCHED,
                                                  &q->state);
                                }
@@ -4244,7 +4244,7 @@ void __napi_complete(struct napi_struct *n)
        BUG_ON(n->gro_list);
 
        list_del(&n->poll_list);
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(NAPI_STATE_SCHED, &n->state);
 }
 EXPORT_SYMBOL(__napi_complete);
index 9c3a839322baccbf69079747a4588079ce46d029..bd0767e6b2b31747a08d02a1f45f7dd733be9a4f 100644 (file)
@@ -147,7 +147,7 @@ static void linkwatch_do_dev(struct net_device *dev)
         * Make sure the above read is complete since it can be
         * rewritten as soon as we clear the bit below.
         */
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
 
        /* We are about to handle this device,
         * so new events can be accepted
index 48f4244651125fb4ef7bcfda155137f287b07c77..56cd458a1b8c461f060b7018ed6843124d75a0ca 100644 (file)
@@ -522,7 +522,7 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
 void inet_putpeer(struct inet_peer *p)
 {
        p->dtime = (__u32)jiffies;
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&p->refcnt);
 }
 EXPORT_SYMBOL_GPL(inet_putpeer);
index 025e25093984bacaca7a5cdaa26074d436f29cf1..366cf06587b8475cae8f0a08f1a89a467affb388 100644 (file)
@@ -1930,10 +1930,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                        /* It is possible TX completion already happened
                         * before we set TSQ_THROTTLED, so we must
                         * test again the condition.
-                        * We abuse smp_mb__after_clear_bit() because
-                        * there is no smp_mb__after_set_bit() yet
                         */
-                       smp_mb__after_clear_bit();
+                       smp_mb__after_atomic();
                        if (atomic_read(&sk->sk_wmem_alloc) > limit)
                                break;
                }
index 75421f2ba8bee3d3bcf6b02e8feb6858052d8b85..1f4f954c4b47c7ecf763290659e900277ad5cb89 100644 (file)
@@ -914,7 +914,7 @@ void nf_conntrack_free(struct nf_conn *ct)
        nf_ct_ext_destroy(ct);
        nf_ct_ext_free(ct);
        kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_dec(&net->ct.count);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_free);
index b7ebe23cdedfb5f31cfafab415b54b3db3f0c30b..d67de453c35aae7257bd29e9791adc2595f6a7d7 100644 (file)
@@ -598,7 +598,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
 {
        atomic64_set(&ic->i_ack_next, seq);
        if (ack_required) {
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
        }
 }
@@ -606,7 +606,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 {
        clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        return atomic64_read(&ic->i_ack_next);
 }
index 45033358358e7ec5cb6a49448c4ba4965cf8d50c..aa8bf6786008afdb6de72221d4678628f93469a0 100644 (file)
@@ -429,7 +429,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
 {
        atomic64_set(&ic->i_ack_next, seq);
        if (ack_required) {
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
        }
 }
@@ -437,7 +437,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
 static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
 {
        clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        return atomic64_read(&ic->i_ack_next);
 }
index a82fb660ec009009b640ef66b1b1840432abd1f3..23718160d71ec9fdb919f95f6c7d73fd61a2ffad 100644 (file)
@@ -107,7 +107,7 @@ static int acquire_in_xmit(struct rds_connection *conn)
 static void release_in_xmit(struct rds_connection *conn)
 {
        clear_bit(RDS_IN_XMIT, &conn->c_flags);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        /*
         * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
         * hot path and finding waiters is very rare.  We don't want to walk
@@ -661,7 +661,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 
        /* order flag updates with spin locks */
        if (!list_empty(&list))
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
 
        spin_unlock_irqrestore(&conn->c_lock, flags);
 
@@ -691,7 +691,7 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
        }
 
        /* order flag updates with the rs lock */
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        spin_unlock_irqrestore(&rs->rs_lock, flags);
 
index 81cf5a4c5e40c3c50b98c6694edd265a37e13b51..53b17ca0dff5a5618d92b2f6aeb876284707959b 100644 (file)
@@ -93,7 +93,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
                rm->m_ack_seq = tc->t_last_sent_nxt +
                                sizeof(struct rds_header) +
                                be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
                tc->t_last_expected_una = rm->m_ack_seq + 1;
 
index 5285ead196c06de33ed55df22404f78c8302bf30..247e973544bfe3d552edbb40dcb8505ab252161d 100644 (file)
@@ -296,7 +296,7 @@ static void
 rpcauth_unhash_cred_locked(struct rpc_cred *cred)
 {
        hlist_del_rcu(&cred->cr_hash);
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
 }
 
index 36e431ee1c902ef1c6ed777331cf97ceda0bdee4..b6e440baccc3733f7b8963ed7ab6fddc72fd0c4c 100644 (file)
@@ -143,7 +143,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
        gss_get_ctx(ctx);
        rcu_assign_pointer(gss_cred->gc_ctx, ctx);
        set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
 }
 
index 3513d559bc453d1465fda21d8860db601506bc06..9761a0da964d81d85a322086ea6c89a1e75b9124 100644 (file)
@@ -244,10 +244,10 @@ void xprt_free_bc_request(struct rpc_rqst *req)
        dprintk("RPC:       free backchannel req=%p\n", req);
 
        req->rq_connect_cookie = xprt->connect_cookie - 1;
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
        clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        if (!xprt_need_to_requeue(xprt)) {
                /*
index d173f79947c6bc6ceb0134d4c9e10fb3d767e92b..89d051de6b3e8da4fb439e9e2380cba71d5c716e 100644 (file)
@@ -230,9 +230,9 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
 {
        xprt->snd_task = NULL;
        if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(XPRT_LOCKED, &xprt->state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
        } else
                queue_work(rpciod_workqueue, &xprt->task_cleanup);
 }
index 25a3dcf15cae97ff54f315412848c269acef3b01..402a7e9a16b7cdf05aa5d23462b3113b1b96c4d3 100644 (file)
@@ -893,11 +893,11 @@ static void xs_close(struct rpc_xprt *xprt)
        xs_reset_transport(transport);
        xprt->reestablish_timeout = 0;
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        clear_bit(XPRT_CLOSING, &xprt->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        xprt_disconnect_done(xprt);
 }
 
@@ -1497,12 +1497,12 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
 
 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
        clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        clear_bit(XPRT_CLOSING, &xprt->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
@@ -1556,10 +1556,10 @@ static void xs_tcp_state_change(struct sock *sk)
                xprt->connect_cookie++;
                xprt->reestablish_timeout = 0;
                set_bit(XPRT_CLOSING, &xprt->state);
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(XPRT_CONNECTED, &xprt->state);
                clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
                break;
        case TCP_CLOSE_WAIT:
@@ -1578,9 +1578,9 @@ static void xs_tcp_state_change(struct sock *sk)
        case TCP_LAST_ACK:
                set_bit(XPRT_CLOSING, &xprt->state);
                xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(XPRT_CONNECTED, &xprt->state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                break;
        case TCP_CLOSE:
                xs_tcp_cancel_linger_timeout(xprt);
index bb7e8ba821f44014d65f3669e017b8c9cca54f53..749f80c21e220b4222a57c376499a3ad18f12ad9 100644 (file)
@@ -1207,7 +1207,7 @@ restart:
        sk->sk_state    = TCP_ESTABLISHED;
        sock_hold(newsk);
 
-       smp_mb__after_atomic_inc();     /* sock_hold() does an atomic_inc() */
+       smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
        unix_peer(sk)   = newsk;
 
        unix_state_unlock(sk);
index 8546711d12f9275c9dba94bbfc1a59476e1c041a..70951fd9b354368e80fa59eeb9f80b70f773c635 100644 (file)
@@ -443,7 +443,7 @@ static int snd_bt87x_pcm_open(struct snd_pcm_substream *substream)
 
 _error:
        clear_bit(0, &chip->opened);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        return err;
 }
 
@@ -458,7 +458,7 @@ static int snd_bt87x_close(struct snd_pcm_substream *substream)
 
        chip->substream = NULL;
        clear_bit(0, &chip->opened);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        return 0;
 }