]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'moduleh/for-sfr'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 29 Sep 2011 05:56:07 +0000 (15:56 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 29 Sep 2011 05:57:44 +0000 (15:57 +1000)
Conflicts:
arch/arm/mach-bcmring/mm.c
drivers/media/dvb/frontends/dibx000_common.c
drivers/misc/altera-stapl/altera.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/scsi/libfc/fc_lport.c
drivers/staging/iio/accel/adis16220_core.c
include/linux/dmaengine.h
sound/soc/soc-io.c

573 files changed:
1  2 
arch/arm/common/scoop.c
arch/arm/kernel/armksyms.c
arch/arm/kernel/process.c
arch/arm/kernel/setup.c
arch/arm/mach-bcmring/mm.c
arch/arm/mach-davinci/board-dm644x-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-imx/mach-mx31lilly.c
arch/arm/mach-imx/mach-mx31lite.c
arch/arm/mach-imx/mach-mx31moboard.c
arch/arm/mach-ixp2000/core.c
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/board-sx1.c
arch/arm/mach-omap1/board-voiceblue.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/voltage.c
arch/arm/mach-pxa/colibri-pxa270.c
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/poodle.c
arch/arm/mach-pxa/spitz.c
arch/arm/mach-pxa/trizeps4.c
arch/arm/mach-s3c2410/mach-h1940.c
arch/arm/mach-sa1100/jornada720.c
arch/arm/mm/init.c
arch/arm/plat-samsung/dev-ts.c
arch/arm/plat-samsung/platformdata.c
arch/blackfin/mach-bf518/boards/ezbrd.c
arch/blackfin/mach-bf527/boards/ad7160eval.c
arch/blackfin/mach-bf527/boards/cm_bf527.c
arch/blackfin/mach-bf527/boards/ezbrd.c
arch/blackfin/mach-bf527/boards/ezkit.c
arch/blackfin/mach-bf527/boards/tll6527m.c
arch/blackfin/mach-bf537/boards/cm_bf537e.c
arch/blackfin/mach-bf537/boards/cm_bf537u.c
arch/blackfin/mach-bf537/boards/dnp5370.c
arch/blackfin/mach-bf537/boards/pnav10.c
arch/blackfin/mach-bf537/boards/stamp.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/mips/bcm47xx/gpio.c
arch/mips/bcm47xx/setup.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/process.c
arch/mips/kernel/traps.c
arch/mips/kernel/vpe.c
arch/parisc/kernel/pci-dma.c
arch/powerpc/include/asm/machdep.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/ibmebus.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/vio.c
arch/powerpc/kvm/44x.c
arch/powerpc/kvm/book3s_exports.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/mmu_context_hash64.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/tlb_nohash.c
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/ps3/system-bus.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/iommu.c
arch/sparc/kernel/process_64.c
arch/x86/crypto/aes_glue.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/irq.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/rtc.c
arch/x86/kernel/sys_x86_64.c
arch/x86/lguest/boot.c
arch/x86/platform/mrst/vrtc.c
block/bsg-lib.c
block/ioctl.c
drivers/ata/libata-eh.c
drivers/base/power/common.c
drivers/base/power/main.c
drivers/base/power/qos.c
drivers/base/power/runtime.c
drivers/base/power/wakeup.c
drivers/base/regmap/regcache.c
drivers/bcma/core.c
drivers/bcma/driver_chipcommon.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_pci.c
drivers/bcma/main.c
drivers/block/aoe/aoeblk.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/ps3vram.c
drivers/bluetooth/btwilink.c
drivers/char/virtio_console.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/governors/ladder.c
drivers/cpuidle/governors/menu.c
drivers/dca/dca-core.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/gpio/gpio-ep93xx.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-mxs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/hid/hid-axff.c
drivers/hid/hid-debug.c
drivers/hid/hid-picolcd.c
drivers/hid/hid-sjoy.c
drivers/hid/usbhid/hid-quirks.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/ucma.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/input/input-mt.c
drivers/input/input-polldev.c
drivers/isdn/i4l/isdn_tty.c
drivers/leds/leds-asic3.c
drivers/md/dm-raid.c
drivers/md/faulty.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/persistent-data/dm-btree.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/saa7146_core.c
drivers/media/common/saa7146_fops.c
drivers/media/common/saa7146_hlp.c
drivers/media/common/saa7146_video.c
drivers/media/dvb/frontends/dibx000_common.c
drivers/media/radio/radio-wl1273.c
drivers/media/radio/wl128x/fmdrv_v4l2.c
drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
drivers/media/rc/rc-main.c
drivers/media/video/adp1653.c
drivers/media/video/cx25840/cx25840-ir.c
drivers/media/video/hexium_gemini.c
drivers/media/video/hexium_orion.c
drivers/media/video/m5mols/m5mols_core.c
drivers/media/video/mt9m111.c
drivers/media/video/mxb.c
drivers/media/video/noon010pc30.c
drivers/media/video/sr030pc30.c
drivers/media/video/tvp7002.c
drivers/media/video/v4l2-ctrls.c
drivers/media/video/v4l2-device.c
drivers/media/video/v4l2-subdev.c
drivers/mfd/ab3100-core.c
drivers/mfd/asic3.c
drivers/mfd/max8997.c
drivers/mfd/twl-core.c
drivers/mfd/twl6030-irq.c
drivers/mfd/wm8400-core.c
drivers/mmc/card/mmc_test.c
drivers/mmc/core/debugfs.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/quirks.c
drivers/mmc/core/sd.c
drivers/mmc/core/sd_ops.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pxav2.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mtd/ar7part.c
drivers/mtd/cmdlinepart.c
drivers/mtd/mtdsuper.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/cmx270_nand.c
drivers/mtd/nand/diskonchip.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/sm_common.c
drivers/mtd/onenand/onenand_bbt.c
drivers/mtd/redboot.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb3/l2t.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/intel/e1000e/param.c
drivers/net/ethernet/mellanox/mlx4/alloc.c
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/usb/lg-vl600.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/adm8211.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/sdio.c
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/ar9002_hw.c
drivers/net/wireless/ath/ath9k/ar9002_mac.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/carl9170/fw.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/iwlegacy/iwl-tx.c
drivers/net/wireless/iwlwifi/iwl-pci.c
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/libertas/if_spi.c
drivers/net/wireless/libertas/if_usb.c
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas/rx.c
drivers/net/wireless/libertas/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/debug.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/wl12xx/boot.c
drivers/nfc/nfcwilink.c
drivers/parisc/sba_iommu.c
drivers/pci/quirks.c
drivers/power/max17042_battery.c
drivers/power/max8903_charger.c
drivers/power/max8997_charger.c
drivers/power/max8998_charger.c
drivers/power/power_supply_sysfs.c
drivers/regulator/88pm8607.c
drivers/regulator/core.c
drivers/regulator/mc13783-regulator.c
drivers/regulator/tps6586x-regulator.c
drivers/s390/char/vmur.c
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_setup.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_qdio.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_host_smp.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/megaraid/megaraid_mbox.c
drivers/spi/spi-dw.c
drivers/ssb/main.c
drivers/staging/brcm80211/brcmfmac/bcmsdh.c
drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/staging/brcm80211/brcmfmac/dhd_linux.c
drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
drivers/staging/brcm80211/brcmsmac/mac80211_if.c
drivers/staging/brcm80211/brcmutil/utils.c
drivers/staging/brcm80211/brcmutil/wifi.c
drivers/staging/dt3155v4l/dt3155v4l.c
drivers/staging/gma500/psb_drv.c
drivers/staging/iio/accel/adis16201_ring.c
drivers/staging/iio/accel/adis16201_trigger.c
drivers/staging/iio/accel/adis16203_ring.c
drivers/staging/iio/accel/adis16203_trigger.c
drivers/staging/iio/accel/adis16204_ring.c
drivers/staging/iio/accel/adis16204_trigger.c
drivers/staging/iio/accel/adis16209_ring.c
drivers/staging/iio/accel/adis16209_trigger.c
drivers/staging/iio/accel/adis16240_ring.c
drivers/staging/iio/accel/adis16240_trigger.c
drivers/staging/iio/accel/lis3l02dq_ring.c
drivers/staging/iio/adc/ad7280a.c
drivers/staging/iio/adc/ad7793.c
drivers/staging/iio/gyro/adis16060_core.c
drivers/staging/iio/gyro/adis16260_ring.c
drivers/staging/iio/gyro/adis16260_trigger.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/staging/iio/imu/adis16400_ring.c
drivers/staging/iio/imu/adis16400_trigger.c
drivers/staging/iio/industrialio-ring.c
drivers/staging/iio/meter/ade7758_ring.c
drivers/staging/iio/meter/ade7758_trigger.c
drivers/staging/rts5139/rts51x_scsi.c
drivers/staging/winbond/wbusb.c
drivers/target/target_core_alua.c
drivers/target/target_core_fabric_lib.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_tmr.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/serial/jsm/jsm_driver.c
drivers/uio/uio_pdrv_genirq.c
drivers/usb/core/driver.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/file_storage.c
drivers/usb/gadget/fusb300_udc.c
drivers/usb/host/xhci-hub.c
drivers/usb/wusbcore/wa-hc.c
drivers/video/atmel_lcdfb.c
drivers/video/carminefb.c
drivers/video/mb862xx/mb862xx-i2c.c
drivers/video/mb862xx/mb862xxfbdrv.c
drivers/video/msm/mdp.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dpi.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/sdi.c
drivers/video/sh_mobile_lcdcfb.c
drivers/xen/balloon.c
drivers/xen/swiotlb-xen.c
drivers/xen/xen-pciback/conf_space.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe_frontend.c
fs/cifs/connect.c
fs/exofs/ore.c
fs/exofs/super.c
fs/gfs2/ops_fstype.c
fs/logfs/super.c
fs/nfs/nfs4filelayout.c
fs/nfs/write.c
fs/nfsd/nfsctl.c
fs/ocfs2/cluster/tcp.c
include/linux/bcma/bcma.h
include/linux/blkdev.h
include/linux/device.h
include/linux/dmaengine.h
include/linux/hid.h
include/linux/irq.h
include/linux/mtd/mtd.h
include/linux/pm_runtime.h
include/linux/regmap.h
include/linux/regulator/driver.h
include/linux/serio.h
include/linux/ssb/ssb.h
include/linux/usb.h
include/media/saa7146.h
include/net/bluetooth/hci_core.h
include/net/lib80211.h
include/net/sock.h
kernel/async.c
kernel/crash_dump.c
kernel/cred.c
kernel/events/core.c
kernel/fork.c
kernel/futex.c
kernel/kprobes.c
kernel/latencytop.c
kernel/lockdep.c
kernel/pid.c
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/qos.c
kernel/power/suspend.c
kernel/ptrace.c
kernel/rcupdate.c
kernel/rcutiny.c
kernel/rcutorture.c
kernel/rcutree.c
kernel/rcutree_trace.c
kernel/rtmutex-debug.c
kernel/rtmutex.c
kernel/sched.c
kernel/semaphore.c
kernel/signal.c
kernel/softirq.c
kernel/sys.c
kernel/time.c
kernel/watchdog.c
kernel/workqueue.c
lib/dma-debug.c
mm/memcontrol.c
mm/swapfile.c
net/802/garp.c
net/802/stp.c
net/bluetooth/hci_sysfs.c
net/bluetooth/mgmt.c
net/caif/caif_dev.c
net/ceph/messenger.c
net/core/dev_addr_lists.c
net/core/fib_rules.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/user_dma.c
net/ipv4/fib_trie.c
net/ipv4/ipmr.c
net/ipv4/proc.c
net/ipv6/addrconf.c
net/ipv6/exthdrs.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6mr.c
net/ipv6/raw.c
net/ipv6/route.c
net/irda/qos.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/ht.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/x_tables.c
net/nfc/nci/core.c
net/nfc/rawsock.c
net/phonet/socket.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/tipc/socket.c
net/wireless/mesh.c
net/wireless/reg.c
net/wireless/sme.c
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-sme.c
security/selinux/hooks.c
security/selinux/netlink.c
sound/arm/pxa2xx-ac97-lib.c
sound/core/control.c
sound/core/pcm_lib.c
sound/core/pcm_native.c
sound/drivers/aloop.c
sound/drivers/ml403-ac97cr.c
sound/drivers/mpu401/mpu401.c
sound/drivers/mpu401/mpu401_uart.c
sound/drivers/mtpav.c
sound/drivers/serial-u16550.c
sound/isa/ad1816a/ad1816a.c
sound/isa/als100.c
sound/isa/azt2320.c
sound/isa/cmi8330.c
sound/isa/cs423x/cs4231.c
sound/isa/cs423x/cs4236.c
sound/isa/es1688/es1688.c
sound/isa/es1688/es1688_lib.c
sound/isa/es18xx.c
sound/isa/gus/gus_main.c
sound/isa/gus/gusextreme.c
sound/isa/gus/gusmax.c
sound/isa/gus/interwave.c
sound/isa/opl3sa2.c
sound/isa/opti9xx/miro.c
sound/isa/opti9xx/opti92x-ad1848.c
sound/isa/sb/sb16.c
sound/isa/sb/sb_common.c
sound/isa/sscape.c
sound/isa/wavefront/wavefront.c
sound/isa/wss/wss_lib.c
sound/mips/au1x00.c
sound/pci/als4000.c
sound/pci/azt3328.c
sound/pci/cmipci.c
sound/pci/es1938.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_hwdep.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/ice1712/ice1712.c
sound/pci/maestro3.c
sound/pci/oxygen/oxygen_lib.c
sound/pci/riptide/riptide.c
sound/pci/rme9652/hdspm.c
sound/pci/sis7019.c
sound/pci/sonicvibes.c
sound/pci/trident/trident.c
sound/pci/via82xx.c
sound/pci/ymfpci/ymfpci.c
sound/pci/ymfpci/ymfpci_main.c
sound/ppc/snd_ps3.c
sound/soc/codecs/sn95031.c
sound/soc/codecs/wl1273.c
sound/soc/ep93xx/edb93xx.c
sound/soc/ep93xx/snappercl15.c
sound/soc/mid-x86/mfld_machine.c
sound/soc/mid-x86/sst_platform.c
sound/soc/omap/sdp4430.c
sound/soc/samsung/ac97.c
sound/soc/samsung/dma.c
sound/soc/samsung/jive_wm8750.c
sound/soc/samsung/s3c2412-i2s.c
sound/soc/samsung/s3c24xx-i2s.c
sound/soc/samsung/s3c24xx_uda134x.c
sound/soc/samsung/smartq_wm8987.c
sound/soc/samsung/smdk_wm8580.c
sound/soc/samsung/smdk_wm8580pcm.c
sound/soc/samsung/speyside.c
sound/soc/samsung/speyside_wm8962.c
sound/soc/sh/fsi.c
sound/soc/soc-cache.c
sound/soc/soc-io.c
sound/soc/soc-jack.c
sound/usb/6fire/firmware.c
sound/usb/card.c
sound/usb/midi.c
virt/kvm/iommu.c

index 1cde34a080d7e688efbf019aae2e55f8e20895c2,f749e60639eeac7fbad31386ebb405f4188eb3cb..3229323eeb5700ad624a22bd5fb86bf2bb2584f1
@@@ -11,8 -11,8 +11,9 @@@
   *
   */
  
+ #include <linux/export.h>
  #include <linux/device.h>
 +#include <linux/gpio.h>
  #include <linux/string.h>
  #include <linux/slab.h>
  #include <linux/platform_device.h>
Simple merge
Simple merge
Simple merge
index 8616876abb9f92df76fd4a53de0bdc01aefd494b,293b6d3e0d1dbc4824337e820c0f9ff50ac22fee..1adec78ec940ef7a620a23e88752a1f6c0c2d642
@@@ -13,7 -13,7 +13,8 @@@
  *****************************************************************************/
  
  #include <linux/platform_device.h>
 +#include <linux/dma-mapping.h>
+ #include <asm/page.h>
  #include <asm/mach/map.h>
  
  #include <mach/hardware.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 64070ac1e761048caf85bf5fb4ebad2e06b85197,e964cfd3a3d0108fd5967a2d337fd5842eef258f..1f8fdf736e630976bc97fc781d58bfbf85704179
  
  #include <linux/delay.h>
  #include <linux/io.h>
 -#include <linux/clk.h>
  #include <linux/err.h>
+ #include <linux/export.h>
  #include <linux/debugfs.h>
  #include <linux/slab.h>
 +#include <linux/clk.h>
  
  #include <plat/common.h>
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 17c3d14d7c4900d5e06c5a0549a27de5db86f06e,0eda3808ee9d444fb667dff87285214154baa1bb..6084dc24b18c0ed20b315dd4522520c833eb15f0
   */
  
  #include <linux/types.h>
+ #include <linux/export.h>
  #include <linux/ssb/ssb.h>
  #include <linux/ssb/ssb_embedded.h>
 +#include <linux/bcma/bcma_soc.h>
  #include <asm/bootinfo.h>
  #include <asm/reboot.h>
  #include <asm/time.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index b0b6950cc8c8850d68366b869ca39e4250827013,bdce3eeeaa37633ead43d56065892cfaa4a53c58..8efcf42a9d7e318b4cddb93a820cd01d1bd01f4e
@@@ -3,8 -3,8 +3,9 @@@
   *
   */
  
+ #include <linux/module.h>
  #include <crypto/aes.h>
 +#include <asm/aes.h>
  
  asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
  asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
index 13c6ec81254582e69eae8b6c69b703370b25fb18,b070bde0582509896d238069963297db40279fb3..ef2e3462702d147375f85d29a04758d9bc4966e1
@@@ -1,6 -1,6 +1,7 @@@
+ #include <linux/export.h>
  #include <linux/init.h>
  #include <linux/bitops.h>
 +#include <linux/elf.h>
  #include <linux/mm.h>
  
  #include <linux/io.h>
index 63aad2742d8a20dddfc867ce7c24e7b3f393ddd1,0dc80178b16c4ad37603b7ee74b053f1527ad2ef..c9cda961275c9626ff6c054703cb872eb9f65372
@@@ -36,7 -36,9 +36,8 @@@
  #include <linux/fs.h>
  #include <linux/mm.h>
  #include <linux/debugfs.h>
 -#include <linux/edac_mce.h>
  #include <linux/irq_work.h>
+ #include <linux/export.h>
  
  #include <asm/processor.h>
  #include <asm/mce.h>
Simple merge
Simple merge
Simple merge
index fe7d2dac7fa3e250bc0b01ac2c8d7c5007bd89f6,ff14a5044ce6ec66366f95acbffef82c732d23c5..051489082d591928e3e212ebc83ece12c93f50e2
@@@ -14,6 -14,6 +14,7 @@@
  #include <linux/personality.h>
  #include <linux/random.h>
  #include <linux/uaccess.h>
++#include <linux/elf.h>
  
  #include <asm/ia32.h>
  #include <asm/syscalls.h>
Simple merge
Simple merge
diff --cc block/bsg-lib.c
index 6690e6e41037ec305e7b5cd606f96c59fb29fe17,6690e6e41037ec305e7b5cd606f96c59fb29fe17..7ad49c88f6b197a04c66e05aab9facacd2781af4
@@@ -25,7 -25,7 +25,7 @@@
  #include <linux/delay.h>
  #include <linux/scatterlist.h>
  #include <linux/bsg-lib.h>
--#include <linux/module.h>
++#include <linux/export.h>
  #include <scsi/scsi_cmnd.h>
  
  /**
diff --cc block/ioctl.c
Simple merge
Simple merge
index 29820c3961823bdc6dbfe5d6061f1100d4c21161,0000000000000000000000000000000000000000..4af7c1cbf909b61d576bc0900df85983d2eedf9d
mode 100644,000000..100644
--- /dev/null
@@@ -1,86 -1,0 +1,86 @@@
- #include <linux/module.h>
 +/*
 + * drivers/base/power/common.c - Common device power management code.
 + *
 + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
 + *
 + * This file is released under the GPLv2.
 + */
 +
 +#include <linux/init.h>
 +#include <linux/kernel.h>
++#include <linux/export.h>
 +#include <linux/slab.h>
 +#include <linux/pm_clock.h>
 +
 +/**
 + * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
 + * @dev: Device to handle.
 + *
 + * If power.subsys_data is NULL, point it to a new object, otherwise increment
 + * its reference counter.  Return 1 if a new object has been created, otherwise
 + * return 0 or error code.
 + */
 +int dev_pm_get_subsys_data(struct device *dev)
 +{
 +      struct pm_subsys_data *psd;
 +      int ret = 0;
 +
 +      psd = kzalloc(sizeof(*psd), GFP_KERNEL);
 +      if (!psd)
 +              return -ENOMEM;
 +
 +      spin_lock_irq(&dev->power.lock);
 +
 +      if (dev->power.subsys_data) {
 +              dev->power.subsys_data->refcount++;
 +      } else {
 +              spin_lock_init(&psd->lock);
 +              psd->refcount = 1;
 +              dev->power.subsys_data = psd;
 +              pm_clk_init(dev);
 +              psd = NULL;
 +              ret = 1;
 +      }
 +
 +      spin_unlock_irq(&dev->power.lock);
 +
 +      /* kfree() verifies that its argument is nonzero. */
 +      kfree(psd);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
 +
 +/**
 + * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
 + * @dev: Device to handle.
 + *
 + * If the reference counter of power.subsys_data is zero after dropping the
 + * reference, power.subsys_data is removed.  Return 1 if that happens or 0
 + * otherwise.
 + */
 +int dev_pm_put_subsys_data(struct device *dev)
 +{
 +      struct pm_subsys_data *psd;
 +      int ret = 0;
 +
 +      spin_lock_irq(&dev->power.lock);
 +
 +      psd = dev_to_psd(dev);
 +      if (!psd) {
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
 +      if (--psd->refcount == 0) {
 +              dev->power.subsys_data = NULL;
 +              kfree(psd);
 +              ret = 1;
 +      }
 +
 + out:
 +      spin_unlock_irq(&dev->power.lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
Simple merge
index e6296b7b388b8f6dd4beed06ff45f8d31bdf414f,0000000000000000000000000000000000000000..44c65d3fbf8eec381b4172e9811f6ad18258bcc9
mode 100644,000000..100644
--- /dev/null
@@@ -1,424 -1,0 +1,425 @@@
 +/*
 + * Devices PM QoS constraints management
 + *
 + * Copyright (C) 2011 Texas Instruments, Inc.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 as
 + * published by the Free Software Foundation.
 + *
 + *
 + * This module exposes the interface to kernel space for specifying
 + * per-device PM QoS dependencies. It provides infrastructure for registration
 + * of:
 + *
 + * Dependents on a QoS value : register requests
 + * Watchers of QoS value : get notified when target QoS value changes
 + *
 + * This QoS design is best effort based. Dependents register their QoS needs.
 + * Watchers register to keep track of the current QoS needs of the system.
 + * Watchers can register different types of notification callbacks:
 + *  . a per-device notification callback using the dev_pm_qos_*_notifier API.
 + *    The notification chain data is stored in the per-device constraint
 + *    data struct.
 + *  . a system-wide notification callback using the dev_pm_qos_*_global_notifier
 + *    API. The notification chain data is stored in a static variable.
 + *
 + * Note about the per-device constraint data struct allocation:
 + * . The per-device constraints data struct ptr is tored into the device
 + *    dev_pm_info.
 + * . To minimize the data usage by the per-device constraints, the data struct
 + *   is only allocated at the first call to dev_pm_qos_add_request.
 + * . The data is later free'd when the device is removed from the system.
 + *  . A global mutex protects the constraints users from the data being
 + *     allocated and free'd.
 + */
 +
 +#include <linux/pm_qos.h>
 +#include <linux/spinlock.h>
 +#include <linux/slab.h>
 +#include <linux/device.h>
 +#include <linux/mutex.h>
++#include <linux/export.h>
 +
 +
 +static DEFINE_MUTEX(dev_pm_qos_mtx);
 +
 +static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
 +
 +/**
 + * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
 + * @dev: Device to get the PM QoS constraint value for.
 + */
 +s32 dev_pm_qos_read_value(struct device *dev)
 +{
 +      struct pm_qos_constraints *c;
 +      unsigned long flags;
 +      s32 ret = 0;
 +
 +      spin_lock_irqsave(&dev->power.lock, flags);
 +
 +      c = dev->power.constraints;
 +      if (c)
 +              ret = pm_qos_read_value(c);
 +
 +      spin_unlock_irqrestore(&dev->power.lock, flags);
 +
 +      return ret;
 +}
 +
 +/*
 + * apply_constraint
 + * @req: constraint request to apply
 + * @action: action to perform add/update/remove, of type enum pm_qos_req_action
 + * @value: defines the qos request
 + *
 + * Internal function to update the constraints list using the PM QoS core
 + * code and if needed call the per-device and the global notification
 + * callbacks
 + */
 +static int apply_constraint(struct dev_pm_qos_request *req,
 +                          enum pm_qos_req_action action, int value)
 +{
 +      int ret, curr_value;
 +
 +      ret = pm_qos_update_target(req->dev->power.constraints,
 +                                 &req->node, action, value);
 +
 +      if (ret) {
 +              /* Call the global callbacks if needed */
 +              curr_value = pm_qos_read_value(req->dev->power.constraints);
 +              blocking_notifier_call_chain(&dev_pm_notifiers,
 +                                           (unsigned long)curr_value,
 +                                           req);
 +      }
 +
 +      return ret;
 +}
 +
 +/*
 + * dev_pm_qos_constraints_allocate
 + * @dev: device to allocate data for
 + *
 + * Called at the first call to add_request, for constraint data allocation
 + * Must be called with the dev_pm_qos_mtx mutex held
 + */
 +static int dev_pm_qos_constraints_allocate(struct device *dev)
 +{
 +      struct pm_qos_constraints *c;
 +      struct blocking_notifier_head *n;
 +
 +      c = kzalloc(sizeof(*c), GFP_KERNEL);
 +      if (!c)
 +              return -ENOMEM;
 +
 +      n = kzalloc(sizeof(*n), GFP_KERNEL);
 +      if (!n) {
 +              kfree(c);
 +              return -ENOMEM;
 +      }
 +      BLOCKING_INIT_NOTIFIER_HEAD(n);
 +
 +      plist_head_init(&c->list);
 +      c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
 +      c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
 +      c->type = PM_QOS_MIN;
 +      c->notifiers = n;
 +
 +      spin_lock_irq(&dev->power.lock);
 +      dev->power.constraints = c;
 +      spin_unlock_irq(&dev->power.lock);
 +
 +      return 0;
 +}
 +
 +static void __dev_pm_qos_constraints_init(struct device *dev)
 +{
 +      spin_lock_irq(&dev->power.lock);
 +      dev->power.constraints = NULL;
 +      spin_unlock_irq(&dev->power.lock);
 +}
 +
 +/**
 + * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
 + * @dev: target device
 + *
 + * Called from the device PM subsystem at device insertion under
 + * device_pm_lock().
 + */
 +void dev_pm_qos_constraints_init(struct device *dev)
 +{
 +      mutex_lock(&dev_pm_qos_mtx);
 +      dev->power.constraints = NULL;
 +      dev->power.power_state = PMSG_ON;
 +      mutex_unlock(&dev_pm_qos_mtx);
 +}
 +
 +/**
 + * dev_pm_qos_constraints_destroy
 + * @dev: target device
 + *
 + * Called from the device PM subsystem at device removal under device_pm_lock().
 + */
 +void dev_pm_qos_constraints_destroy(struct device *dev)
 +{
 +      struct dev_pm_qos_request *req, *tmp;
 +      struct pm_qos_constraints *c;
 +
 +      mutex_lock(&dev_pm_qos_mtx);
 +
 +      dev->power.power_state = PMSG_INVALID;
 +      c = dev->power.constraints;
 +      if (!c)
 +              goto out;
 +
 +      /* Flush the constraints list for the device */
 +      plist_for_each_entry_safe(req, tmp, &c->list, node) {
 +              /*
 +               * Update constraints list and call the notification
 +               * callbacks if needed
 +               */
 +              apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 +              memset(req, 0, sizeof(*req));
 +      }
 +
 +      __dev_pm_qos_constraints_init(dev);
 +
 +      kfree(c->notifiers);
 +      kfree(c);
 +
 + out:
 +      mutex_unlock(&dev_pm_qos_mtx);
 +}
 +
 +/**
 + * dev_pm_qos_add_request - inserts new qos request into the list
 + * @dev: target device for the constraint
 + * @req: pointer to a preallocated handle
 + * @value: defines the qos request
 + *
 + * This function inserts a new entry in the device constraints list of
 + * requested qos performance characteristics. It recomputes the aggregate
 + * QoS expectations of parameters and initializes the dev_pm_qos_request
 + * handle.  Caller needs to save this handle for later use in updates and
 + * removal.
 + *
 + * Returns 1 if the aggregated constraint value has changed,
 + * 0 if the aggregated constraint value has not changed,
 + * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
 + * to allocate for data structures, -ENODEV if the device has just been removed
 + * from the system.
 + */
 +int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
 +                         s32 value)
 +{
 +      int ret = 0;
 +
 +      if (!dev || !req) /*guard against callers passing in null */
 +              return -EINVAL;
 +
 +      if (dev_pm_qos_request_active(req)) {
 +              WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
 +                      "added request\n");
 +              return -EINVAL;
 +      }
 +
 +      req->dev = dev;
 +
 +      mutex_lock(&dev_pm_qos_mtx);
 +
 +      if (!dev->power.constraints) {
 +              if (dev->power.power_state.event == PM_EVENT_INVALID) {
 +                      /* The device has been removed from the system. */
 +                      req->dev = NULL;
 +                      ret = -ENODEV;
 +                      goto out;
 +              } else {
 +                      /*
 +                       * Allocate the constraints data on the first call to
 +                       * add_request, i.e. only if the data is not already
 +                       * allocated and if the device has not been removed.
 +                       */
 +                      ret = dev_pm_qos_constraints_allocate(dev);
 +              }
 +      }
 +
 +      if (!ret)
 +              ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
 +
 + out:
 +      mutex_unlock(&dev_pm_qos_mtx);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
 +
 +/**
 + * dev_pm_qos_update_request - modifies an existing qos request
 + * @req : handle to list element holding a dev_pm_qos request to use
 + * @new_value: defines the qos request
 + *
 + * Updates an existing dev PM qos request along with updating the
 + * target value.
 + *
 + * Attempts are made to make this code callable on hot code paths.
 + *
 + * Returns 1 if the aggregated constraint value has changed,
 + * 0 if the aggregated constraint value has not changed,
 + * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 + * removed from the system
 + */
 +int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
 +                            s32 new_value)
 +{
 +      int ret = 0;
 +
 +      if (!req) /*guard against callers passing in null */
 +              return -EINVAL;
 +
 +      if (!dev_pm_qos_request_active(req)) {
 +              WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
 +                      "unknown object\n");
 +              return -EINVAL;
 +      }
 +
 +      mutex_lock(&dev_pm_qos_mtx);
 +
 +      if (req->dev->power.constraints) {
 +              if (new_value != req->node.prio)
 +                      ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
 +                                             new_value);
 +      } else {
 +              /* Return if the device has been removed */
 +              ret = -ENODEV;
 +      }
 +
 +      mutex_unlock(&dev_pm_qos_mtx);
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
 +
 +/**
 + * dev_pm_qos_remove_request - modifies an existing qos request
 + * @req: handle to request list element
 + *
 + * Will remove pm qos request from the list of constraints and
 + * recompute the current target value. Call this on slow code paths.
 + *
 + * Returns 1 if the aggregated constraint value has changed,
 + * 0 if the aggregated constraint value has not changed,
 + * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 + * removed from the system
 + */
 +int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
 +{
 +      int ret = 0;
 +
 +      if (!req) /*guard against callers passing in null */
 +              return -EINVAL;
 +
 +      if (!dev_pm_qos_request_active(req)) {
 +              WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
 +                      "unknown object\n");
 +              return -EINVAL;
 +      }
 +
 +      mutex_lock(&dev_pm_qos_mtx);
 +
 +      if (req->dev->power.constraints) {
 +              ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
 +                                     PM_QOS_DEFAULT_VALUE);
 +              memset(req, 0, sizeof(*req));
 +      } else {
 +              /* Return if the device has been removed */
 +              ret = -ENODEV;
 +      }
 +
 +      mutex_unlock(&dev_pm_qos_mtx);
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
 +
 +/**
 + * dev_pm_qos_add_notifier - sets notification entry for changes to target value
 + * of per-device PM QoS constraints
 + *
 + * @dev: target device for the constraint
 + * @notifier: notifier block managed by caller.
 + *
 + * Will register the notifier into a notification chain that gets called
 + * upon changes to the target value for the device.
 + */
 +int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
 +{
 +      int retval = 0;
 +
 +      mutex_lock(&dev_pm_qos_mtx);
 +
 +      /* Silently return if the constraints object is not present. */
 +      if (dev->power.constraints)
 +              retval = blocking_notifier_chain_register(
 +                              dev->power.constraints->notifiers,
 +                              notifier);
 +
 +      mutex_unlock(&dev_pm_qos_mtx);
 +      return retval;
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
 +
 +/**
 + * dev_pm_qos_remove_notifier - deletes notification for changes to target value
 + * of per-device PM QoS constraints
 + *
 + * @dev: target device for the constraint
 + * @notifier: notifier block to be removed.
 + *
 + * Will remove the notifier from the notification chain that gets called
 + * upon changes to the target value.
 + */
 +int dev_pm_qos_remove_notifier(struct device *dev,
 +                             struct notifier_block *notifier)
 +{
 +      int retval = 0;
 +
 +      mutex_lock(&dev_pm_qos_mtx);
 +
 +      /* Silently return if the constraints object is not present. */
 +      if (dev->power.constraints)
 +              retval = blocking_notifier_chain_unregister(
 +                              dev->power.constraints->notifiers,
 +                              notifier);
 +
 +      mutex_unlock(&dev_pm_qos_mtx);
 +      return retval;
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
 +
 +/**
 + * dev_pm_qos_add_global_notifier - sets notification entry for changes to
 + * target value of the PM QoS constraints for any device
 + *
 + * @notifier: notifier block managed by caller.
 + *
 + * Will register the notifier into a notification chain that gets called
 + * upon changes to the target value for any device.
 + */
 +int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
 +{
 +      return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
 +
 +/**
 + * dev_pm_qos_remove_global_notifier - deletes notification for changes to
 + * target value of PM QoS constraints for any device
 + *
 + * @notifier: notifier block to be removed.
 + *
 + * Will remove the notifier from the notification chain that gets called
 + * upon changes to the target value for any device.
 + */
 +int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
 +{
 +      return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
 +}
 +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
index 7a6fb5e34a0e4bbaf527eae45d1f0484ebdbc24a,5b084de0fbe90a5315ae0351aab04f8e61e0c4bc..56a43c0deab8955c90aadf80831cc2548e5da4d9
@@@ -8,8 -8,8 +8,9 @@@
   */
  
  #include <linux/sched.h>
+ #include <linux/export.h>
  #include <linux/pm_runtime.h>
 +#include <trace/events/rpm.h>
  #include "power.h"
  
  static int rpm_resume(struct device *dev, int rpmflags);
Simple merge
index e2b172b93dba172f15e225c945021916b7183bfb,0000000000000000000000000000000000000000..d99c4540b9bcca1baa3ed72d69bcf139e7903687
mode 100644,000000..100644
--- /dev/null
@@@ -1,333 -1,0 +1,334 @@@
 +/*
 + * Register cache access API
 + *
 + * Copyright 2011 Wolfson Microelectronics plc
 + *
 + * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 as
 + * published by the Free Software Foundation.
 + */
 +
 +#include <linux/slab.h>
++#include <linux/export.h>
 +#include <trace/events/regmap.h>
 +
 +#include "internal.h"
 +
 +static const struct regcache_ops *cache_types[] = {
 +      &regcache_indexed_ops,
 +      &regcache_rbtree_ops,
 +      &regcache_lzo_ops,
 +};
 +
 +static int regcache_hw_init(struct regmap *map)
 +{
 +      int i, j;
 +      int ret;
 +      int count;
 +      unsigned int val;
 +      void *tmp_buf;
 +
 +      if (!map->num_reg_defaults_raw)
 +              return -EINVAL;
 +
 +      if (!map->reg_defaults_raw) {
 +              dev_warn(map->dev, "No cache defaults, reading back from HW\n");
 +              tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
 +              if (!tmp_buf)
 +                      return -EINVAL;
 +              ret = regmap_bulk_read(map, 0, tmp_buf,
 +                                     map->num_reg_defaults_raw);
 +              if (ret < 0) {
 +                      kfree(tmp_buf);
 +                      return ret;
 +              }
 +              map->reg_defaults_raw = tmp_buf;
 +              map->cache_free = 1;
 +      }
 +
 +      /* calculate the size of reg_defaults */
 +      for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
 +              val = regcache_get_val(map->reg_defaults_raw,
 +                                     i, map->cache_word_size);
 +              if (!val)
 +                      continue;
 +              count++;
 +      }
 +
 +      map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
 +                                    GFP_KERNEL);
 +      if (!map->reg_defaults)
 +              return -ENOMEM;
 +
 +      /* fill the reg_defaults */
 +      map->num_reg_defaults = count;
 +      for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
 +              val = regcache_get_val(map->reg_defaults_raw,
 +                                     i, map->cache_word_size);
 +              if (!val)
 +                      continue;
 +              map->reg_defaults[j].reg = i;
 +              map->reg_defaults[j].def = val;
 +              j++;
 +      }
 +
 +      return 0;
 +}
 +
 +int regcache_init(struct regmap *map)
 +{
 +      int ret;
 +      int i;
 +      void *tmp_buf;
 +
 +      if (map->cache_type == REGCACHE_NONE) {
 +              map->cache_bypass = true;
 +              return 0;
 +      }
 +
 +      for (i = 0; i < ARRAY_SIZE(cache_types); i++)
 +              if (cache_types[i]->type == map->cache_type)
 +                      break;
 +
 +      if (i == ARRAY_SIZE(cache_types)) {
 +              dev_err(map->dev, "Could not match compress type: %d\n",
 +                      map->cache_type);
 +              return -EINVAL;
 +      }
 +
 +      map->cache = NULL;
 +      map->cache_ops = cache_types[i];
 +
 +      if (!map->cache_ops->read ||
 +          !map->cache_ops->write ||
 +          !map->cache_ops->name)
 +              return -EINVAL;
 +
 +      /* We still need to ensure that the reg_defaults
 +       * won't vanish from under us.  We'll need to make
 +       * a copy of it.
 +       */
 +      if (map->reg_defaults) {
 +              if (!map->num_reg_defaults)
 +                      return -EINVAL;
 +              tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
 +                                sizeof(struct reg_default), GFP_KERNEL);
 +              if (!tmp_buf)
 +                      return -ENOMEM;
 +              map->reg_defaults = tmp_buf;
 +      } else {
 +              /* Some devices such as PMIC's don't have cache defaults,
 +               * we cope with this by reading back the HW registers and
 +               * crafting the cache defaults by hand.
 +               */
 +              ret = regcache_hw_init(map);
 +              if (ret < 0)
 +                      return ret;
 +      }
 +
 +      if (!map->max_register)
 +              map->max_register = map->num_reg_defaults_raw;
 +
 +      if (map->cache_ops->init) {
 +              dev_dbg(map->dev, "Initializing %s cache\n",
 +                      map->cache_ops->name);
 +              return map->cache_ops->init(map);
 +      }
 +      return 0;
 +}
 +
 +void regcache_exit(struct regmap *map)
 +{
 +      if (map->cache_type == REGCACHE_NONE)
 +              return;
 +
 +      BUG_ON(!map->cache_ops);
 +
 +      kfree(map->reg_defaults);
 +      if (map->cache_free)
 +              kfree(map->reg_defaults_raw);
 +
 +      if (map->cache_ops->exit) {
 +              dev_dbg(map->dev, "Destroying %s cache\n",
 +                      map->cache_ops->name);
 +              map->cache_ops->exit(map);
 +      }
 +}
 +
 +/**
 + * regcache_read: Fetch the value of a given register from the cache.
 + *
 + * @map: map to configure.
 + * @reg: The register index.
 + * @value: The value to be returned.
 + *
 + * Return a negative value on failure, 0 on success.
 + */
 +int regcache_read(struct regmap *map,
 +                unsigned int reg, unsigned int *value)
 +{
 +      if (map->cache_type == REGCACHE_NONE)
 +              return -ENOSYS;
 +
 +      BUG_ON(!map->cache_ops);
 +
 +      if (!regmap_readable(map, reg))
 +              return -EIO;
 +
 +      if (!regmap_volatile(map, reg))
 +              return map->cache_ops->read(map, reg, value);
 +
 +      return -EINVAL;
 +}
 +EXPORT_SYMBOL_GPL(regcache_read);
 +
 +/**
 + * regcache_write: Set the value of a given register in the cache.
 + *
 + * @map: map to configure.
 + * @reg: The register index.
 + * @value: The new register value.
 + *
 + * Return a negative value on failure, 0 on success.
 + */
 +int regcache_write(struct regmap *map,
 +                 unsigned int reg, unsigned int value)
 +{
 +      if (map->cache_type == REGCACHE_NONE)
 +              return 0;
 +
 +      BUG_ON(!map->cache_ops);
 +
 +      if (!regmap_writeable(map, reg))
 +              return -EIO;
 +
 +      if (!regmap_volatile(map, reg))
 +              return map->cache_ops->write(map, reg, value);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(regcache_write);
 +
 +/**
 + * regcache_sync: Sync the register cache with the hardware.
 + *
 + * @map: map to configure.
 + *
 + * Any registers that should not be synced should be marked as
 + * volatile.  In general drivers can choose not to use the provided
 + * syncing functionality if they so require.
 + *
 + * Return a negative value on failure, 0 on success.
 + */
 +int regcache_sync(struct regmap *map)
 +{
 +      int ret;
 +      const char *name;
 +
 +      BUG_ON(!map->cache_ops);
 +
 +      if (map->cache_ops->sync) {
 +              dev_dbg(map->dev, "Syncing %s cache\n",
 +                      map->cache_ops->name);
 +              name = map->cache_ops->name;
 +              trace_regcache_sync(map->dev, name, "start");
 +              ret = map->cache_ops->sync(map);
 +              trace_regcache_sync(map->dev, name, "stop");
 +      }
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(regcache_sync);
 +
 +/**
 + * regcache_cache_only: Put a register map into cache only mode
 + *
 + * @map: map to configure
 + * @cache_only: flag if changes should be written to the hardware
 + *
 + * When a register map is marked as cache only writes to the register
 + * map API will only update the register cache, they will not cause
 + * any hardware changes.  This is useful for allowing portions of
 + * drivers to act as though the device were functioning as normal when
 + * it is disabled for power saving reasons.
 + */
 +void regcache_cache_only(struct regmap *map, bool enable)
 +{
 +      map->cache_only = enable;
 +}
 +EXPORT_SYMBOL_GPL(regcache_cache_only);
 +
 +bool regcache_set_val(void *base, unsigned int idx,
 +                    unsigned int val, unsigned int word_size)
 +{
 +      switch (word_size) {
 +      case 1: {
 +              u8 *cache = base;
 +              if (cache[idx] == val)
 +                      return true;
 +              cache[idx] = val;
 +              break;
 +      }
 +      case 2: {
 +              u16 *cache = base;
 +              if (cache[idx] == val)
 +                      return true;
 +              cache[idx] = val;
 +              break;
 +      }
 +      default:
 +              BUG();
 +      }
 +      /* unreachable */
 +      return false;
 +}
 +
 +unsigned int regcache_get_val(const void *base, unsigned int idx,
 +                            unsigned int word_size)
 +{
 +      if (!base)
 +              return -EINVAL;
 +
 +      switch (word_size) {
 +      case 1: {
 +              const u8 *cache = base;
 +              return cache[idx];
 +      }
 +      case 2: {
 +              const u16 *cache = base;
 +              return cache[idx];
 +      }
 +      default:
 +              BUG();
 +      }
 +      /* unreachable */
 +      return -1;
 +}
 +
 +int regcache_lookup_reg(struct regmap *map, unsigned int reg)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < map->num_reg_defaults; i++)
 +              if (map->reg_defaults[i].reg == reg)
 +                      return i;
 +      return -1;
 +}
 +
 +int regcache_insert_reg(struct regmap *map, unsigned int reg,
 +                      unsigned int val)
 +{
 +      void *tmp;
 +
 +      tmp = krealloc(map->reg_defaults,
 +                     (map->num_reg_defaults + 1) * sizeof(struct reg_default),
 +                     GFP_KERNEL);
 +      if (!tmp)
 +              return -ENOMEM;
 +      map->reg_defaults = tmp;
 +      map->num_reg_defaults++;
 +      map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
 +      map->reg_defaults[map->num_reg_defaults - 1].def = val;
 +      return 0;
 +}
Simple merge
Simple merge
index 2968d809d49f8357cae15ef644cdd5a81cbe0a5a,fcc63db0ce7542ded6428071103315370dda11cb..800163c8c2e7a046a4aa1219e6d7c876be0be19b
@@@ -9,49 -9,22 +9,50 @@@
   */
  
  #include "bcma_private.h"
++#include <linux/export.h>
  #include <linux/bcma/bcma.h>
  
 -static void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
 -                                      u32 offset, u32 mask, u32 set)
 +static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
  {
 -      u32 value;
 +      bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
 +      bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
 +      return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
 +}
  
 -      bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
 +void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
 +{
 +      bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
 +      bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
 +      bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
 +}
 +EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
 +
 +void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
 +                           u32 set)
 +{
 +      bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
 +      bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
 +      bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
 +}
 +EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
 +
 +void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
 +                               u32 offset, u32 mask, u32 set)
 +{
        bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
        bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
 -      value = bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
 -      value &= mask;
 -      value |= set;
 -      bcma_cc_write32(cc, BCMA_CC_CHIPCTL_DATA, value);
 -      bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
 +      bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
 +}
 +EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
 +
 +void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
 +                              u32 set)
 +{
 +      bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
 +      bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
 +      bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
  }
 +EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
  
  static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
  {
Simple merge
Simple merge
Simple merge
index ca48406555a756d0b892dcaaf3002801b85337ed,0000000000000000000000000000000000000000..1768d9f782f0d6c6e4f7c95a147225b34822e8d1
mode 100644,000000..100644
--- /dev/null
@@@ -1,3567 -1,0 +1,3568 @@@
 +/*
 + * Driver for the Micron P320 SSD
 + *   Copyright (C) 2011 Micron Technology, Inc.
 + *
 + * Portions of this code were derived from works subjected to the
 + * following copyright:
 + *    Copyright (C) 2009 Integrated Device Technology, Inc.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; either version 2 of the License, or
 + * (at your option) any later version.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + */
 +
 +#include <linux/pci.h>
 +#include <linux/interrupt.h>
 +#include <linux/ata.h>
 +#include <linux/delay.h>
 +#include <linux/hdreg.h>
 +#include <linux/uaccess.h>
 +#include <linux/random.h>
 +#include <linux/smp.h>
 +#include <linux/compat.h>
 +#include <linux/fs.h>
 +#include <linux/module.h>
 +#include <linux/genhd.h>
 +#include <linux/blkdev.h>
 +#include <linux/bio.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/idr.h>
++#include <linux/module.h>
 +#include <../drivers/ata/ahci.h>
 +#include "mtip32xx.h"
 +
 +#define HW_CMD_SLOT_SZ                (MTIP_MAX_COMMAND_SLOTS * 32)
 +#define HW_CMD_TBL_SZ         (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
 +#define HW_CMD_TBL_AR_SZ      (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
 +#define HW_PORT_PRIV_DMA_SZ \
 +              (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
 +
 +#define HOST_HSORG            0xFC
 +#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
 +#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
 +#define HSORG_HWREV           0xFF00
 +#define HSORG_STYLE           0x8
 +#define HSORG_SLOTGROUPS      0x7
 +
 +#define PORT_COMMAND_ISSUE    0x38
 +#define PORT_SDBV             0x7C
 +
 +#define PORT_OFFSET           0x100
 +#define PORT_MEM_SIZE         0x80
 +
 +#define PORT_IRQ_ERR \
 +      (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
 +       PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
 +       PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
 +       PORT_IRQ_OVERFLOW)
 +#define PORT_IRQ_LEGACY \
 +      (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
 +#define PORT_IRQ_HANDLED \
 +      (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
 +       PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
 +       PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
 +#define DEF_PORT_IRQ \
 +      (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
 +
 +/* product numbers */
 +#define MTIP_PRODUCT_UNKNOWN  0x00
 +#define MTIP_PRODUCT_ASICFPGA 0x11
 +
 +/* Device instance number, incremented each time a device is probed. */
 +static int instance;
 +
 +/*
 + * Global variable used to hold the major block device number
 + * allocated in mtip_init().
 + */
 +static int mtip_major;
 +
 +static DEFINE_SPINLOCK(rssd_index_lock);
 +static DEFINE_IDA(rssd_index_ida);
 +
 +#ifdef CONFIG_COMPAT
 +struct mtip_compat_ide_task_request_s {
 +      __u8            io_ports[8];
 +      __u8            hob_ports[8];
 +      ide_reg_valid_t out_flags;
 +      ide_reg_valid_t in_flags;
 +      int             data_phase;
 +      int             req_cmd;
 +      compat_ulong_t  out_size;
 +      compat_ulong_t  in_size;
 +};
 +#endif
 +
 +static int mtip_exec_internal_command(struct mtip_port *port,
 +                              void *fis,
 +                              int fisLen,
 +                              dma_addr_t buffer,
 +                              int bufLen,
 +                              u32 opts,
 +                              gfp_t atomic,
 +                              unsigned long timeout);
 +
 +/*
 + * This function check_for_surprise_removal is called
 + * while card is removed from the system and it will
 + * read the vendor id from the configration space
 + *
 + * @pdev Pointer to the pci_dev structure.
 + *
 + * return value
 + *     true if device removed, else false
 + */
 +static bool mtip_check_surprise_removal(struct pci_dev *pdev)
 +{
 +      u16 vendor_id = 0;
 +
 +       /* Read the vendorID from the configuration space */
 +      pci_read_config_word(pdev, 0x00, &vendor_id);
 +      if (vendor_id == 0xFFFF)
 +              return true; /* device removed */
 +
 +      return false; /* device present */
 +}
 +
 +/*
 + * This function is called for clean the pending command in the
 + * command slot during the surprise removal of device and return
 + * error to the upper layer.
 + *
 + * @dd Pointer to the DRIVER_DATA structure.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_command_cleanup(struct driver_data *dd)
 +{
 +      int group = 0, commandslot = 0, commandindex = 0;
 +      struct mtip_cmd *command;
 +      struct mtip_port *port = dd->port;
 +
 +      for (group = 0; group < 4; group++) {
 +              for (commandslot = 0; commandslot < 32; commandslot++) {
 +                      if (!(port->allocated[group] & (1 << commandslot)))
 +                              continue;
 +
 +                      commandindex = group << 5 | commandslot;
 +                      command = &port->commands[commandindex];
 +
 +                      if (atomic_read(&command->active)
 +                          && (command->async_callback)) {
 +                              command->async_callback(command->async_data,
 +                                      -ENODEV);
 +                              command->async_callback = NULL;
 +                              command->async_data = NULL;
 +                      }
 +
 +                      dma_unmap_sg(&port->dd->pdev->dev,
 +                              command->sg,
 +                              command->scatter_ents,
 +                              command->direction);
 +              }
 +      }
 +
 +      up(&port->cmd_slot);
 +
 +      atomic_set(&dd->drv_cleanup_done, true);
 +}
 +
 +/*
 + * Obtain an empty command slot.
 + *
 + * This function needs to be reentrant since it could be called
 + * at the same time on multiple CPUs. The allocation of the
 + * command slot must be atomic.
 + *
 + * @port Pointer to the port data structure.
 + *
 + * return value
 + *    >= 0    Index of command slot obtained.
 + *    -1      No command slots available.
 + */
 +static int get_slot(struct mtip_port *port)
 +{
 +      int slot, i;
 +      unsigned int num_command_slots = port->dd->slot_groups * 32;
 +
 +      /*
 +       * Try 10 times, because there is a small race here.
 +       *  that's ok, because it's still cheaper than a lock.
 +       *
 +       * Race: Since this section is not protected by lock, same bit
 +       * could be chosen by different process contexts running in
 +       * different processor. So instead of costly lock, we are going
 +       * with loop.
 +       */
 +      for (i = 0; i < 10; i++) {
 +              slot = find_next_zero_bit(port->allocated,
 +                                       num_command_slots, 1);
 +              if ((slot < num_command_slots) &&
 +                  (!test_and_set_bit(slot, port->allocated)))
 +                      return slot;
 +      }
 +      dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
 +
 +      if (mtip_check_surprise_removal(port->dd->pdev)) {
 +              /* Device not present, clean outstanding commands */
 +              mtip_command_cleanup(port->dd);
 +      }
 +      return -1;
 +}
 +
 +/*
 + * Release a command slot.
 + *
 + * @port Pointer to the port data structure.
 + * @tag  Tag of command to release
 + *
 + * return value
 + *    None
 + */
 +static inline void release_slot(struct mtip_port *port, int tag)
 +{
 +      smp_mb__before_clear_bit();
 +      clear_bit(tag, port->allocated);
 +      smp_mb__after_clear_bit();
 +}
 +
 +/*
 + * Reset the HBA (without sleeping)
 + *
 + * Just like hba_reset, except does not call sleep, so can be
 + * run from interrupt/tasklet context.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0       The reset was successful.
 + *    -1      The HBA Reset bit did not clear.
 + */
 +static int hba_reset_nosleep(struct driver_data *dd)
 +{
 +      unsigned long timeout;
 +
 +      /* Chip quirk: quiesce any chip function */
 +      mdelay(10);
 +
 +      /* Set the reset bit */
 +      writel(HOST_RESET, dd->mmio + HOST_CTL);
 +
 +      /* Flush */
 +      readl(dd->mmio + HOST_CTL);
 +
 +      /*
 +       * Wait 10ms then spin for up to 1 second
 +       * waiting for reset acknowledgement
 +       */
 +      timeout = jiffies + msecs_to_jiffies(1000);
 +      mdelay(10);
 +      while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
 +               && time_before(jiffies, timeout))
 +              mdelay(1);
 +
 +      if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
 +              return -1;
 +
 +      return 0;
 +}
 +
 +/*
 + * Issue a command to the hardware.
 + *
 + * Set the appropriate bit in the s_active and Command Issue hardware
 + * registers, causing hardware command processing to begin.
 + *
 + * @port Pointer to the port structure.
 + * @tag  The tag of the command to be issued.
 + *
 + * return value
 + *      None
 + */
 +static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
 +{
 +      unsigned long flags = 0;
 +
 +      atomic_set(&port->commands[tag].active, 1);
 +
 +      spin_lock_irqsave(&port->cmd_issue_lock, flags);
 +
 +      writel((1 << MTIP_TAG_BIT(tag)),
 +                      port->s_active[MTIP_TAG_INDEX(tag)]);
 +      writel((1 << MTIP_TAG_BIT(tag)),
 +                      port->cmd_issue[MTIP_TAG_INDEX(tag)]);
 +
 +      spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
 +}
 +
 +/*
 + * Enable/disable the reception of FIS
 + *
 + * @port   Pointer to the port data structure
 + * @enable 1 to enable, 0 to disable
 + *
 + * return value
 + *    Previous state: 1 enabled, 0 disabled
 + */
 +static int mtip_enable_fis(struct mtip_port *port, int enable)
 +{
 +      u32 tmp;
 +
 +      /* enable FIS reception */
 +      tmp = readl(port->mmio + PORT_CMD);
 +      if (enable)
 +              writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
 +      else
 +              writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
 +
 +      /* Flush */
 +      readl(port->mmio + PORT_CMD);
 +
 +      return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
 +}
 +
 +/*
 + * Enable/disable the DMA engine
 + *
 + * @port   Pointer to the port data structure
 + * @enable 1 to enable, 0 to disable
 + *
 + * return value
 + *    Previous state: 1 enabled, 0 disabled.
 + */
 +static int mtip_enable_engine(struct mtip_port *port, int enable)
 +{
 +      u32 tmp;
 +
 +      /* enable FIS reception */
 +      tmp = readl(port->mmio + PORT_CMD);
 +      if (enable)
 +              writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
 +      else
 +              writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
 +
 +      readl(port->mmio + PORT_CMD);
 +      return (((tmp & PORT_CMD_START) == PORT_CMD_START));
 +}
 +
 +/*
 + * Enables the port DMA engine and FIS reception.
 + *
 + * return value
 + *    None
 + */
 +static inline void mtip_start_port(struct mtip_port *port)
 +{
 +      /* Enable FIS reception */
 +      mtip_enable_fis(port, 1);
 +
 +      /* Enable the DMA engine */
 +      mtip_enable_engine(port, 1);
 +}
 +
 +/*
 + * Deinitialize a port by disabling port interrupts, the DMA engine,
 + * and FIS reception.
 + *
 + * @port Pointer to the port structure
 + *
 + * return value
 + *    None
 + */
 +static inline void mtip_deinit_port(struct mtip_port *port)
 +{
 +      /* Disable interrupts on this port */
 +      writel(0, port->mmio + PORT_IRQ_MASK);
 +
 +      /* Disable the DMA engine */
 +      mtip_enable_engine(port, 0);
 +
 +      /* Disable FIS reception */
 +      mtip_enable_fis(port, 0);
 +}
 +
 +/*
 + * Initialize a port.
 + *
 + * This function deinitializes the port by calling mtip_deinit_port() and
 + * then initializes it by setting the command header and RX FIS addresses,
 + * clearing the SError register and any pending port interrupts before
 + * re-enabling the default set of port interrupts.
 + *
 + * @port Pointer to the port structure.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_init_port(struct mtip_port *port)
 +{
 +      int i;
 +      mtip_deinit_port(port);
 +
 +      /* Program the command list base and FIS base addresses */
 +      if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
 +              writel((port->command_list_dma >> 16) >> 16,
 +                       port->mmio + PORT_LST_ADDR_HI);
 +              writel((port->rxfis_dma >> 16) >> 16,
 +                       port->mmio + PORT_FIS_ADDR_HI);
 +      }
 +
 +      writel(port->command_list_dma & 0xffffffff,
 +                      port->mmio + PORT_LST_ADDR);
 +      writel(port->rxfis_dma & 0xffffffff, port->mmio + PORT_FIS_ADDR);
 +
 +      /* Clear SError */
 +      writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
 +
 +      /* reset the completed registers.*/
 +      for (i = 0; i < port->dd->slot_groups; i++)
 +              writel(0xFFFFFFFF, port->completed[i]);
 +
 +      /* Clear any pending interrupts for this port */
 +      writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
 +
 +      /* Enable port interrupts */
 +      writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
 +}
 +
 +/*
 + * Restart a port
 + *
 + * @port Pointer to the port data structure.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_restart_port(struct mtip_port *port)
 +{
 +      unsigned long timeout;
 +
 +      /* Disable the DMA engine */
 +      mtip_enable_engine(port, 0);
 +
 +      /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
 +      timeout = jiffies + msecs_to_jiffies(500);
 +      while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
 +               && time_before(jiffies, timeout))
 +              ;
 +
 +      /*
 +       * Chip quirk: escalate to hba reset if
 +       * PxCMD.CR not clear after 500 ms
 +       */
 +      if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
 +              dev_warn(&port->dd->pdev->dev,
 +                      "PxCMD.CR not clear, escalating reset\n");
 +
 +              if (hba_reset_nosleep(port->dd))
 +                      dev_err(&port->dd->pdev->dev,
 +                              "HBA reset escalation failed.\n");
 +
 +              /* 30 ms delay before com reset to quiesce chip */
 +              mdelay(30);
 +      }
 +
 +      dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
 +
 +      /* Set PxSCTL.DET */
 +      writel(readl(port->mmio + PORT_SCR_CTL) |
 +                       1, port->mmio + PORT_SCR_CTL);
 +      readl(port->mmio + PORT_SCR_CTL);
 +
 +      /* Wait 1 ms to quiesce chip function */
 +      timeout = jiffies + msecs_to_jiffies(1);
 +      while (time_before(jiffies, timeout))
 +              ;
 +
 +      /* Clear PxSCTL.DET */
 +      writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
 +                       port->mmio + PORT_SCR_CTL);
 +      readl(port->mmio + PORT_SCR_CTL);
 +
 +      /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
 +      timeout = jiffies + msecs_to_jiffies(500);
 +      while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
 +                       && time_before(jiffies, timeout))
 +              ;
 +
 +      if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
 +              dev_warn(&port->dd->pdev->dev,
 +                      "COM reset failed\n");
 +
 +      /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
 +      writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
 +
 +      /* Enable the DMA engine */
 +      mtip_enable_engine(port, 1);
 +}
 +
 +/*
 + * Called periodically to see if any read/write commands are
 + * taking too long to complete.
 + *
 + * @data Pointer to the PORT data structure.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_timeout_function(unsigned long int data)
 +{
 +      struct mtip_port *port = (struct mtip_port *) data;
 +      struct host_to_dev_fis *fis;
 +      struct mtip_cmd *command;
 +      int tag, cmdto_cnt = 0;
 +      unsigned int bit, group;
 +      unsigned int num_command_slots = port->dd->slot_groups * 32;
 +
 +      if (unlikely(!port))
 +              return;
 +
 +      if (atomic_read(&port->dd->resumeflag) == true) {
 +              mod_timer(&port->cmd_timer,
 +                      jiffies + msecs_to_jiffies(30000));
 +              return;
 +      }
 +
 +      for (tag = 0; tag < num_command_slots; tag++) {
 +              /*
 +               * Skip internal command slot as it has
 +               * its own timeout mechanism
 +               */
 +              if (tag == MTIP_TAG_INTERNAL)
 +                      continue;
 +
 +              if (atomic_read(&port->commands[tag].active) &&
 +                 (time_after(jiffies, port->commands[tag].comp_time))) {
 +                      group = tag >> 5;
 +                      bit = tag & 0x1f;
 +
 +                      command = &port->commands[tag];
 +                      fis = (struct host_to_dev_fis *) command->command;
 +
 +                      dev_warn(&port->dd->pdev->dev,
 +                              "Timeout for command tag %d\n", tag);
 +
 +                      cmdto_cnt++;
 +                      if (cmdto_cnt == 1)
 +                              atomic_inc(&port->dd->eh_active);
 +
 +                      /*
 +                       * Clear the completed bit. This should prevent
 +                       *  any interrupt handlers from trying to retire
 +                       *  the command.
 +                       */
 +                      writel(1 << bit, port->completed[group]);
 +
 +                      /* Call the async completion callback. */
 +                      if (likely(command->async_callback))
 +                              command->async_callback(command->async_data,
 +                                                       -EIO);
 +                      command->async_callback = NULL;
 +                      command->comp_func = NULL;
 +
 +                      /* Unmap the DMA scatter list entries */
 +                      dma_unmap_sg(&port->dd->pdev->dev,
 +                                      command->sg,
 +                                      command->scatter_ents,
 +                                      command->direction);
 +
 +                      /*
 +                       * Clear the allocated bit and active tag for the
 +                       * command.
 +                       */
 +                      atomic_set(&port->commands[tag].active, 0);
 +                      release_slot(port, tag);
 +
 +                      up(&port->cmd_slot);
 +              }
 +      }
 +
 +      if (cmdto_cnt) {
 +              dev_warn(&port->dd->pdev->dev,
 +                      "%d commands timed out: restarting port",
 +                      cmdto_cnt);
 +              mtip_restart_port(port);
 +              atomic_dec(&port->dd->eh_active);
 +      }
 +
 +      /* Restart the timer */
 +      mod_timer(&port->cmd_timer,
 +              jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
 +}
 +
 +/*
 + * IO completion function.
 + *
 + * This completion function is called by the driver ISR when a
 + * command that was issued by the kernel completes. It first calls the
 + * asynchronous completion function which normally calls back into the block
 + * layer passing the asynchronous callback data, then unmaps the
 + * scatter list associated with the completed command, and finally
 + * clears the allocated bit associated with the completed command.
 + *
 + * @port   Pointer to the port data structure.
 + * @tag    Tag of the command.
 + * @data   Pointer to driver_data.
 + * @status Completion status.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_async_complete(struct mtip_port *port,
 +                              int tag,
 +                              void *data,
 +                              int status)
 +{
 +      struct mtip_cmd *command;
 +      struct driver_data *dd = data;
 +      int cb_status = status ? -EIO : 0;
 +
 +      if (unlikely(!dd) || unlikely(!port))
 +              return;
 +
 +      command = &port->commands[tag];
 +
 +      if (unlikely(status == PORT_IRQ_TF_ERR)) {
 +              dev_warn(&port->dd->pdev->dev,
 +                      "Command tag %d failed due to TFE\n", tag);
 +      }
 +
 +      /* Upper layer callback */
 +      if (likely(command->async_callback))
 +              command->async_callback(command->async_data, cb_status);
 +
 +      command->async_callback = NULL;
 +      command->comp_func = NULL;
 +
 +      /* Unmap the DMA scatter list entries */
 +      dma_unmap_sg(&dd->pdev->dev,
 +              command->sg,
 +              command->scatter_ents,
 +              command->direction);
 +
 +      /* Clear the allocated and active bits for the command */
 +      atomic_set(&port->commands[tag].active, 0);
 +      release_slot(port, tag);
 +
 +      up(&port->cmd_slot);
 +}
 +
 +/*
 + * Internal command completion callback function.
 + *
 + * This function is normally called by the driver ISR when an internal
 + * command completed. This function signals the command completion by
 + * calling complete().
 + *
 + * @port   Pointer to the port data structure.
 + * @tag    Tag of the command that has completed.
 + * @data   Pointer to a completion structure.
 + * @status Completion status.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_completion(struct mtip_port *port,
 +                          int tag,
 +                          void *data,
 +                          int status)
 +{
 +      struct mtip_cmd *command = &port->commands[tag];
 +      struct completion *waiting = data;
 +      if (unlikely(status == PORT_IRQ_TF_ERR))
 +              dev_warn(&port->dd->pdev->dev,
 +                      "Internal command %d completed with TFE\n", tag);
 +
 +      command->async_callback = NULL;
 +      command->comp_func = NULL;
 +
 +      complete(waiting);
 +}
 +
 +/*
 + * Helper function for tag logging
 + */
 +static void print_tags(struct driver_data *dd,
 +                      char *msg,
 +                      unsigned long *tagbits)
 +{
 +      unsigned int tag, count = 0;
 +
 +      for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
 +              if (test_bit(tag, tagbits))
 +                      count++;
 +      }
 +      if (count)
 +              dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
 +}
 +
 +/*
 + * Handle an error.
 + *
 + * @dd Pointer to the DRIVER_DATA structure.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_handle_tfe(struct driver_data *dd)
 +{
 +      int group, tag, bit, reissue;
 +      struct mtip_port *port;
 +      struct mtip_cmd  *command;
 +      u32 completed;
 +      struct host_to_dev_fis *fis;
 +      unsigned long tagaccum[SLOTBITS_IN_LONGS];
 +
 +      dev_warn(&dd->pdev->dev, "Taskfile error\n");
 +
 +      port = dd->port;
 +
 +      /* Stop the timer to prevent command timeouts. */
 +      del_timer(&port->cmd_timer);
 +
 +      /* Set eh_active */
 +      atomic_inc(&dd->eh_active);
 +
 +      /* Loop through all the groups */
 +      for (group = 0; group < dd->slot_groups; group++) {
 +              completed = readl(port->completed[group]);
 +
 +              /* clear completed status register in the hardware.*/
 +              writel(completed, port->completed[group]);
 +
 +              /* clear the tag accumulator */
 +              memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
 +
 +              /* Process successfully completed commands */
 +              for (bit = 0; bit < 32 && completed; bit++) {
 +                      if (!(completed & (1<<bit)))
 +                              continue;
 +                      tag = (group << 5) + bit;
 +
 +                      /* Skip the internal command slot */
 +                      if (tag == MTIP_TAG_INTERNAL)
 +                              continue;
 +
 +                      command = &port->commands[tag];
 +                      if (likely(command->comp_func)) {
 +                              set_bit(tag, tagaccum);
 +                              atomic_set(&port->commands[tag].active, 0);
 +                              command->comp_func(port,
 +                                       tag,
 +                                       command->comp_data,
 +                                       0);
 +                      } else {
 +                              dev_err(&port->dd->pdev->dev,
 +                                      "Missing completion func for tag %d",
 +                                      tag);
 +                              if (mtip_check_surprise_removal(dd->pdev)) {
 +                                      mtip_command_cleanup(dd);
 +                                      /* don't proceed further */
 +                                      return;
 +                              }
 +                      }
 +              }
 +      }
 +      print_tags(dd, "TFE tags completed:", tagaccum);
 +
 +      /* Restart the port */
 +      mdelay(20);
 +      mtip_restart_port(port);
 +
 +      /* clear the tag accumulator */
 +      memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
 +
 +      /* Loop through all the groups */
 +      for (group = 0; group < dd->slot_groups; group++) {
 +              for (bit = 0; bit < 32; bit++) {
 +                      reissue = 1;
 +                      tag = (group << 5) + bit;
 +
 +                      /* If the active bit is set re-issue the command */
 +                      if (atomic_read(&port->commands[tag].active) == 0)
 +                              continue;
 +
 +                      fis = (struct host_to_dev_fis *)
 +                              port->commands[tag].command;
 +
 +                      /* Should re-issue? */
 +                      if (tag == MTIP_TAG_INTERNAL ||
 +                          fis->command == ATA_CMD_SET_FEATURES)
 +                              reissue = 0;
 +
 +                      /*
 +                       * First check if this command has
 +                       *  exceeded its retries.
 +                       */
 +                      if (reissue &&
 +                          (port->commands[tag].retries-- > 0)) {
 +
 +                              set_bit(tag, tagaccum);
 +
 +                              /* Update the timeout value. */
 +                              port->commands[tag].comp_time =
 +                                      jiffies + msecs_to_jiffies(
 +                                      MTIP_NCQ_COMMAND_TIMEOUT_MS);
 +                              /* Re-issue the command. */
 +                              mtip_issue_ncq_command(port, tag);
 +
 +                              continue;
 +                      }
 +
 +                      /* Retire a command that will not be reissued */
 +                      dev_warn(&port->dd->pdev->dev,
 +                              "retiring tag %d\n", tag);
 +                      atomic_set(&port->commands[tag].active, 0);
 +
 +                      if (port->commands[tag].comp_func)
 +                              port->commands[tag].comp_func(
 +                                      port,
 +                                      tag,
 +                                      port->commands[tag].comp_data,
 +                                      PORT_IRQ_TF_ERR);
 +                      else
 +                              dev_warn(&port->dd->pdev->dev,
 +                                      "Bad completion for tag %d\n",
 +                                      tag);
 +              }
 +      }
 +      print_tags(dd, "TFE tags reissued:", tagaccum);
 +
 +      /* Decrement eh_active */
 +      atomic_dec(&dd->eh_active);
 +
 +      mod_timer(&port->cmd_timer,
 +               jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
 +}
 +
 +/*
 + * Handle a set device bits interrupt
 + */
 +static inline void mtip_process_sdbf(struct driver_data *dd)
 +{
 +      struct mtip_port  *port = dd->port;
 +      int group, tag, bit;
 +      u32 completed;
 +      struct mtip_cmd *command;
 +
 +      /* walk all bits in all slot groups */
 +      for (group = 0; group < dd->slot_groups; group++) {
 +              completed = readl(port->completed[group]);
 +
 +              /* clear completed status register in the hardware.*/
 +              writel(completed, port->completed[group]);
 +
 +              /* Process completed commands. */
 +              for (bit = 0;
 +                   (bit < 32) && completed;
 +                   bit++, completed >>= 1) {
 +                      if (completed & 0x01) {
 +                              tag = (group << 5) | bit;
 +
 +                              /* skip internal command slot. */
 +                              if (unlikely(tag == MTIP_TAG_INTERNAL))
 +                                      continue;
 +
 +                              command = &port->commands[tag];
 +
 +                              /* make internal callback */
 +                              if (likely(command->comp_func)) {
 +                                      command->comp_func(
 +                                              port,
 +                                              tag,
 +                                              command->comp_data,
 +                                              0);
 +                              } else {
 +                                      dev_warn(&dd->pdev->dev,
 +                                              "Null completion "
 +                                              "for tag %d",
 +                                              tag);
 +
 +                                      if (mtip_check_surprise_removal(
 +                                              dd->pdev)) {
 +                                              mtip_command_cleanup(dd);
 +                                              return;
 +                                      }
 +                              }
 +                      }
 +              }
 +      }
 +}
 +
 +/*
 + * Process legacy pio and d2h interrupts
 + */
 +static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
 +{
 +      struct mtip_port *port = dd->port;
 +      struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
 +
 +      if (port->internal_cmd_in_progress &&
 +          cmd != NULL &&
 +          !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
 +              & (1 << MTIP_TAG_INTERNAL))) {
 +              if (cmd->comp_func) {
 +                      cmd->comp_func(port,
 +                              MTIP_TAG_INTERNAL,
 +                              cmd->comp_data,
 +                              0);
 +                      return;
 +              }
 +      }
 +
 +      dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
 +
 +      return;
 +}
 +
 +/*
 + * Demux and handle errors
 + */
 +static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
 +{
 +      if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR)))
 +              mtip_handle_tfe(dd);
 +
 +      if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
 +              dev_warn(&dd->pdev->dev,
 +                      "Clearing PxSERR.DIAG.x\n");
 +              writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
 +      }
 +
 +      if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
 +              dev_warn(&dd->pdev->dev,
 +                      "Clearing PxSERR.DIAG.n\n");
 +              writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
 +      }
 +
 +      if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
 +              dev_warn(&dd->pdev->dev,
 +                      "Port stat errors %x unhandled\n",
 +                      (port_stat & ~PORT_IRQ_HANDLED));
 +      }
 +}
 +
 +static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
 +{
 +      struct driver_data *dd = (struct driver_data *) data;
 +      struct mtip_port *port = dd->port;
 +      u32 hba_stat, port_stat;
 +      int rv = IRQ_NONE;
 +
 +      hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
 +      if (hba_stat) {
 +              rv = IRQ_HANDLED;
 +
 +              /* Acknowledge the interrupt status on the port.*/
 +              port_stat = readl(port->mmio + PORT_IRQ_STAT);
 +              writel(port_stat, port->mmio + PORT_IRQ_STAT);
 +
 +              /* Demux port status */
 +              if (likely(port_stat & PORT_IRQ_SDB_FIS))
 +                      mtip_process_sdbf(dd);
 +
 +              if (unlikely(port_stat & PORT_IRQ_ERR)) {
 +                      if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
 +                              mtip_command_cleanup(dd);
 +                              /* don't proceed further */
 +                              return IRQ_HANDLED;
 +                      }
 +
 +                      mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
 +              }
 +
 +              if (unlikely(port_stat & PORT_IRQ_LEGACY))
 +                      mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
 +      }
 +
 +      /* acknowledge interrupt */
 +      writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
 +
 +      return rv;
 +}
 +
 +/*
 + * Wrapper for mtip_handle_irq
 + * (ignores return code)
 + */
 +static void mtip_tasklet(unsigned long data)
 +{
 +      mtip_handle_irq((struct driver_data *) data);
 +}
 +
 +/*
 + * HBA interrupt subroutine.
 + *
 + * @irq               IRQ number.
 + * @instance  Pointer to the driver data structure.
 + *
 + * return value
 + *    IRQ_HANDLED     A HBA interrupt was pending and handled.
 + *    IRQ_NONE        This interrupt was not for the HBA.
 + */
 +static irqreturn_t mtip_irq_handler(int irq, void *instance)
 +{
 +      struct driver_data *dd = instance;
 +      tasklet_schedule(&dd->tasklet);
 +      return IRQ_HANDLED;
 +}
 +
 +static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
 +{
 +      atomic_set(&port->commands[tag].active, 1);
 +      writel(1 << MTIP_TAG_BIT(tag),
 +              port->cmd_issue[MTIP_TAG_INDEX(tag)]);
 +}
 +
 +/*
 + * Wait for port to quiesce
 + *
 + * @port    Pointer to port data structure
 + * @timeout Max duration to wait (ms)
 + *
 + * return value
 + *    0       Success
 + *    -EBUSY  Commands still active
 + */
 +static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
 +{
 +      unsigned long to;
 +      unsigned int n, active;
 +
 +      to = jiffies + msecs_to_jiffies(timeout);
 +      do {
 +              /*
 +               * Ignore s_active bit 0 of array element 0.
 +               * This bit will always be set
 +               */
 +              active = readl(port->s_active[0]) & 0xfffffffe;
 +              for (n = 1; n < port->dd->slot_groups; n++)
 +                      active |= readl(port->s_active[n]);
 +
 +              if (!active)
 +                      break;
 +
 +              msleep(20);
 +      } while (time_before(jiffies, to));
 +
 +      return active ? -EBUSY : 0;
 +}
 +
 +/*
 + * Execute an internal command and wait for the completion.
 + *
 + * @port    Pointer to the port data structure.
 + * @fis     Pointer to the FIS that describes the command.
 + * @fisLen  Length in WORDS of the FIS.
 + * @buffer  DMA accessible for command data.
 + * @bufLen  Length, in bytes, of the data buffer.
 + * @opts    Command header options, excluding the FIS length
 + *             and the number of PRD entries.
 + * @timeout Time in ms to wait for the command to complete.
 + *
 + * return value
 + *    0        Command completed successfully.
 + *    -EFAULT  The buffer address is not correctly aligned.
 + *    -EBUSY   Internal command or other IO in progress.
 + *    -EAGAIN  Time out waiting for command to complete.
 + */
 +static int mtip_exec_internal_command(struct mtip_port *port,
 +                                      void *fis,
 +                                      int fisLen,
 +                                      dma_addr_t buffer,
 +                                      int bufLen,
 +                                      u32 opts,
 +                                      gfp_t atomic,
 +                                      unsigned long timeout)
 +{
 +      struct mtip_cmd_sg *command_sg;
 +      DECLARE_COMPLETION_ONSTACK(wait);
 +      int rv = 0;
 +      struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
 +
 +      /* Make sure the buffer is 8 byte aligned. This is asic specific. */
 +      if (buffer & 0x00000007) {
 +              dev_err(&port->dd->pdev->dev,
 +                      "SG buffer is not 8 byte aligned\n");
 +              return -EFAULT;
 +      }
 +
 +      /* Only one internal command should be running at a time */
 +      if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
 +              dev_warn(&port->dd->pdev->dev,
 +                      "Internal command already active\n");
 +              return -EBUSY;
 +      }
 +      port->internal_cmd_in_progress = 1;
 +
 +      if (atomic == GFP_KERNEL) {
 +              /* wait for io to complete if non atomic */
 +              if (mtip_quiesce_io(port, 5000) < 0) {
 +                      dev_warn(&port->dd->pdev->dev,
 +                              "Failed to quiesce IO\n");
 +                      release_slot(port, MTIP_TAG_INTERNAL);
 +                      port->internal_cmd_in_progress = 0;
 +                      return -EBUSY;
 +              }
 +
 +              /* Set the completion function and data for the command. */
 +              int_cmd->comp_data = &wait;
 +              int_cmd->comp_func = mtip_completion;
 +
 +      } else {
 +              /* Clear completion - we're going to poll */
 +              int_cmd->comp_data = NULL;
 +              int_cmd->comp_func = NULL;
 +      }
 +
 +      /* Copy the command to the command table */
 +      memcpy(int_cmd->command, fis, fisLen*4);
 +
 +      /* Populate the SG list */
 +      int_cmd->command_header->opts =
 +               cpu_to_le32(opts | fisLen);
 +      if (bufLen) {
 +              command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
 +
 +              command_sg->info = cpu_to_le32((bufLen-1) & 0x3fffff);
 +              command_sg->dba = cpu_to_le32(buffer & 0xffffffff);
 +              command_sg->dba_upper = cpu_to_le32((buffer >> 16) >> 16);
 +
 +              int_cmd->command_header->opts |= cpu_to_le32((1 << 16));
 +      }
 +
 +      /* Populate the command header */
 +      int_cmd->command_header->byte_count = 0;
 +
 +      /* Issue the command to the hardware */
 +      mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
 +
 +      /* Poll if atomic, wait_for_completion otherwise */
 +      if (atomic == GFP_KERNEL) {
 +              /* Wait for the command to complete or timeout. */
 +              if (wait_for_completion_timeout(
 +                              &wait,
 +                              msecs_to_jiffies(timeout)) == 0) {
 +                      dev_err(&port->dd->pdev->dev,
 +                              "Internal command did not complete [%d]\n",
 +                              atomic);
 +                      rv = -EAGAIN;
 +              }
 +
 +              if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
 +                      & (1 << MTIP_TAG_INTERNAL)) {
 +                      dev_warn(&port->dd->pdev->dev,
 +                              "Retiring internal command but CI is 1.\n");
 +              }
 +
 +      } else {
 +              /* Spin for <timeout> checking if command still outstanding */
 +              timeout = jiffies + msecs_to_jiffies(timeout);
 +
 +              while ((readl(
 +                      port->cmd_issue[MTIP_TAG_INTERNAL])
 +                      & (1 << MTIP_TAG_INTERNAL))
 +                      && time_before(jiffies, timeout))
 +                      ;
 +
 +              if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
 +                      & (1 << MTIP_TAG_INTERNAL)) {
 +                      dev_err(&port->dd->pdev->dev,
 +                              "Internal command did not complete [%d]\n",
 +                              atomic);
 +                      rv = -EAGAIN;
 +              }
 +      }
 +
 +      /* Clear the allocated and active bits for the internal command. */
 +      atomic_set(&int_cmd->active, 0);
 +      release_slot(port, MTIP_TAG_INTERNAL);
 +      port->internal_cmd_in_progress = 0;
 +
 +      return rv;
 +}
 +
 +/*
 + * Byte-swap ATA ID strings.
 + *
 + * ATA identify data contains strings in byte-swapped 16-bit words.
 + * They must be swapped (on all architectures) to be usable as C strings.
 + * This function swaps bytes in-place.
 + *
 + * @buf The buffer location of the string
 + * @len The number of bytes to swap
 + *
 + * return value
 + *    None
 + */
 +static inline void ata_swap_string(u16 *buf, unsigned int len)
 +{
 +      int i;
 +      for (i = 0; i < (len/2); i++)
 +              be16_to_cpus(&buf[i]);
 +}
 +
 +/*
 + * Request the device identity information.
 + *
 + * If a user space buffer is not specified, i.e. is NULL, the
 + * identify information is still read from the drive and placed
 + * into the identify data buffer (@e port->identify) in the
 + * port data structure.
 + * When the identify buffer contains valid identify information @e
 + * port->identify_valid is non-zero.
 + *
 + * @port       Pointer to the port structure.
 + * @user_buffer  A user space buffer where the identify data should be
 + *                    copied.
 + *
 + * return value
 + *    0       Command completed successfully.
 + *    -EFAULT An error occurred while coping data to the user buffer.
 + *    -1      Command failed.
 + */
 +static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
 +{
 +      int rv = 0;
 +      struct host_to_dev_fis fis;
 +
 +      down_write(&port->dd->internal_sem);
 +
 +      /* Build the FIS. */
 +      memset(&fis, 0, sizeof(struct host_to_dev_fis));
 +      fis.type        = 0x27;
 +      fis.opts        = 1 << 7;
 +      fis.command     = ATA_CMD_ID_ATA;
 +
 +      /* Set the identify information as invalid. */
 +      port->identify_valid = 0;
 +
 +      /* Clear the identify information. */
 +      memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
 +
 +      /* Execute the command. */
 +      if (mtip_exec_internal_command(port,
 +                              &fis,
 +                              5,
 +                              port->identify_dma,
 +                              sizeof(u16) * ATA_ID_WORDS,
 +                              0,
 +                              GFP_KERNEL,
 +                              MTIP_INTERNAL_COMMAND_TIMEOUT_MS)
 +                              < 0) {
 +              rv = -1;
 +              goto out;
 +      }
 +
 +      /*
 +       * Perform any necessary byte-swapping.  Yes, the kernel does in fact
 +       * perform field-sensitive swapping on the string fields.
 +       * See the kernel use of ata_id_string() for proof of this.
 +       */
 +#ifdef __LITTLE_ENDIAN
 +      ata_swap_string(port->identify + 27, 40);  /* model string*/
 +      ata_swap_string(port->identify + 23, 8);   /* firmware string*/
 +      ata_swap_string(port->identify + 10, 20);  /* serial# string*/
 +#else
 +      {
 +              int i;
 +              for (i = 0; i < ATA_ID_WORDS; i++)
 +                      port->identify[i] = le16_to_cpu(port->identify[i]);
 +      }
 +#endif
 +
 +      /* Set the identify buffer as valid. */
 +      port->identify_valid = 1;
 +
 +      if (user_buffer) {
 +              if (copy_to_user(
 +                      user_buffer,
 +                      port->identify,
 +                      ATA_ID_WORDS * sizeof(u16))) {
 +                      rv = -EFAULT;
 +                      goto out;
 +              }
 +      }
 +
 +out:
 +      up_write(&port->dd->internal_sem);
 +      return rv;
 +}
 +
 +/*
 + * Issue a standby immediate command to the device.
 + *
 + * @port Pointer to the port structure.
 + *
 + * return value
 + *    0       Command was executed successfully.
 + *    -1      An error occurred while executing the command.
 + */
 +static int mtip_standby_immediate(struct mtip_port *port)
 +{
 +      int rv;
 +      struct host_to_dev_fis  fis;
 +
 +      down_write(&port->dd->internal_sem);
 +
 +      /* Build the FIS. */
 +      memset(&fis, 0, sizeof(struct host_to_dev_fis));
 +      fis.type        = 0x27;
 +      fis.opts        = 1 << 7;
 +      fis.command     = ATA_CMD_STANDBYNOW1;
 +
 +      /* Execute the command.  Use a 15-second timeout for large drives. */
 +      rv = mtip_exec_internal_command(port,
 +                                      &fis,
 +                                      5,
 +                                      0,
 +                                      0,
 +                                      0,
 +                                      GFP_KERNEL,
 +                                      15000);
 +
 +      up_write(&port->dd->internal_sem);
 +
 +      return rv;
 +}
 +
 +/*
 + * Get the drive capacity.
 + *
 + * @dd      Pointer to the device data structure.
 + * @sectors Pointer to the variable that will receive the sector count.
 + *
 + * return value
 + *    1 Capacity was returned successfully.
 + *    0 The identify information is invalid.
 + */
 +static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
 +{
 +      struct mtip_port *port = dd->port;
 +      u64 total, raw0, raw1, raw2, raw3;
 +      raw0 = port->identify[100];
 +      raw1 = port->identify[101];
 +      raw2 = port->identify[102];
 +      raw3 = port->identify[103];
 +      total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
 +      *sectors = total;
 +      return (bool) !!port->identify_valid;
 +}
 +
 +/*
 + * Reset the HBA.
 + *
 + * Resets the HBA by setting the HBA Reset bit in the Global
 + * HBA Control register. After setting the HBA Reset bit the
 + * function waits for 1 second before reading the HBA Reset
 + * bit to make sure it has cleared. If HBA Reset is not clear
 + * an error is returned. Cannot be used in non-blockable
 + * context.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0  The reset was successful.
 + *    -1 The HBA Reset bit did not clear.
 + */
 +static int mtip_hba_reset(struct driver_data *dd)
 +{
 +      mtip_deinit_port(dd->port);
 +
 +      /* Set the reset bit */
 +      writel(HOST_RESET, dd->mmio + HOST_CTL);
 +
 +      /* Flush */
 +      readl(dd->mmio + HOST_CTL);
 +
 +      /* Wait for reset to clear */
 +      ssleep(1);
 +
 +      /* Check the bit has cleared */
 +      if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
 +              dev_err(&dd->pdev->dev,
 +                      "Reset bit did not clear.\n");
 +              return -1;
 +      }
 +
 +      return 0;
 +}
 +
 +/*
 + * Display the identify command data.
 + *
 + * @port Pointer to the port data structure.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_dump_identify(struct mtip_port *port)
 +{
 +      sector_t sectors;
 +      unsigned short revid;
 +      char cbuf[42];
 +
 +      if (!port->identify_valid)
 +              return;
 +
 +      strlcpy(cbuf, (char *)(port->identify+10), 21);
 +      dev_info(&port->dd->pdev->dev,
 +              "Serial No.: %s\n", cbuf);
 +
 +      strlcpy(cbuf, (char *)(port->identify+23), 9);
 +      dev_info(&port->dd->pdev->dev,
 +              "Firmware Ver.: %s\n", cbuf);
 +
 +      strlcpy(cbuf, (char *)(port->identify+27), 41);
 +      dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
 +
 +      if (mtip_hw_get_capacity(port->dd, &sectors))
 +              dev_info(&port->dd->pdev->dev,
 +                      "Capacity: %llu sectors (%llu MB)\n",
 +                       (u64)sectors,
 +                       ((u64)sectors) * ATA_SECT_SIZE >> 20);
 +
 +      pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
 +      switch (revid & 0xff) {
 +      case 0x1:
 +              strlcpy(cbuf, "A0", 3);
 +              break;
 +      case 0x3:
 +              strlcpy(cbuf, "A2", 3);
 +              break;
 +      default:
 +              strlcpy(cbuf, "?", 2);
 +              break;
 +      }
 +      dev_info(&port->dd->pdev->dev,
 +              "Card Type: %s\n", cbuf);
 +}
 +
 +/*
 + * Map the commands scatter list into the command table.
 + *
 + * @command Pointer to the command.
 + * @nents Number of scatter list entries.
 + *
 + * return value
 + *    None
 + */
 +static inline void fill_command_sg(struct driver_data *dd,
 +                              struct mtip_cmd *command,
 +                              int nents)
 +{
 +      int n;
 +      unsigned int dma_len;
 +      struct mtip_cmd_sg *command_sg;
 +      struct scatterlist *sg = command->sg;
 +
 +      command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
 +
 +      for (n = 0; n < nents; n++) {
 +              dma_len = sg_dma_len(sg);
 +              if (dma_len > 0x400000)
 +                      dev_err(&dd->pdev->dev,
 +                              "DMA segment length truncated\n");
 +              command_sg->info = cpu_to_le32((dma_len-1) & 0x3fffff);
 +#if (BITS_PER_LONG == 64)
 +              *((unsigned long *) &command_sg->dba) =
 +                       cpu_to_le64(sg_dma_address(sg));
 +#else
 +              command_sg->dba = cpu_to_le32(sg_dma_address(sg));
 +              command_sg->dba_upper   =
 +                       cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
 +#endif
 +              command_sg++;
 +              sg++;
 +      }
 +}
 +
 +/*
 + * @brief Execute a drive command.
 + *
 + * return value 0 The command completed successfully.
 + * return value -1 An error occurred while executing the command.
 + */
 +static int exec_drive_task(struct mtip_port *port, u8 *command)
 +{
 +      struct host_to_dev_fis  fis;
 +      struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
 +
 +      /* Lock the internal command semaphore. */
 +      down_write(&port->dd->internal_sem);
 +
 +      /* Build the FIS. */
 +      memset(&fis, 0, sizeof(struct host_to_dev_fis));
 +      fis.type        = 0x27;
 +      fis.opts        = 1 << 7;
 +      fis.command     = command[0];
 +      fis.features    = command[1];
 +      fis.sect_count  = command[2];
 +      fis.sector      = command[3];
 +      fis.cyl_low     = command[4];
 +      fis.cyl_hi      = command[5];
 +      fis.device      = command[6] & ~0x10; /* Clear the dev bit*/
 +
 +
 +      dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
 +              "nsect %x, sect %x, lcyl %x, "
 +              "hcyl %x, sel %x\n",
 +              __func__,
 +              command[0],
 +              command[1],
 +              command[2],
 +              command[3],
 +              command[4],
 +              command[5],
 +              command[6]);
 +
 +      /* Execute the command. */
 +      if (mtip_exec_internal_command(port,
 +                               &fis,
 +                               5,
 +                               0,
 +                               0,
 +                               0,
 +                               GFP_KERNEL,
 +                               MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) {
 +              up_write(&port->dd->internal_sem);
 +              return -1;
 +      }
 +
 +      command[0] = reply->command; /* Status*/
 +      command[1] = reply->features; /* Error*/
 +      command[4] = reply->cyl_low;
 +      command[5] = reply->cyl_hi;
 +
 +      dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
 +              "err %x , cyl_lo %x cyl_hi %x\n",
 +              __func__,
 +              command[0],
 +              command[1],
 +              command[4],
 +              command[5]);
 +
 +      up_write(&port->dd->internal_sem);
 +      return 0;
 +}
 +
 +/*
 + * @brief Execute a drive command.
 + *
 + * @param port Pointer to the port data structure.
 + * @param command Pointer to the user specified command parameters.
 + * @param user_buffer Pointer to the user space buffer where read sector
 + *                   data should be copied.
 + *
 + * return value 0 The command completed successfully.
 + * return value -EFAULT An error occurred while copying the completion
 + *                 data to the user space buffer.
 + * return value -1 An error occurred while executing the command.
 + */
 +static int exec_drive_command(struct mtip_port *port, u8 *command,
 +                              void __user *user_buffer)
 +{
 +      struct host_to_dev_fis  fis;
 +      struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
 +
 +      /* Lock the internal command semaphore. */
 +      down_write(&port->dd->internal_sem);
 +
 +      /* Build the FIS. */
 +      memset(&fis, 0, sizeof(struct host_to_dev_fis));
 +      fis.type                = 0x27;
 +      fis.opts                = 1 << 7;
 +      fis.command             = command[0];
 +      fis.features    = command[2];
 +      fis.sect_count  = command[3];
 +      if (fis.command == ATA_CMD_SMART) {
 +              fis.sector      = command[1];
 +              fis.cyl_low     = 0x4f;
 +              fis.cyl_hi      = 0xc2;
 +      }
 +
 +      dbg_printk(MTIP_DRV_NAME
 +              "%s: User Command: cmd %x, sect %x, "
 +              "feat %x, sectcnt %x\n",
 +              __func__,
 +              command[0],
 +              command[1],
 +              command[2],
 +              command[3]);
 +
 +      memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
 +
 +      /* Execute the command. */
 +      if (mtip_exec_internal_command(port,
 +                              &fis,
 +                               5,
 +                               port->sector_buffer_dma,
 +                               (command[3] != 0) ? ATA_SECT_SIZE : 0,
 +                               0,
 +                               GFP_KERNEL,
 +                               MTIP_IOCTL_COMMAND_TIMEOUT_MS)
 +                               < 0) {
 +              up_write(&port->dd->internal_sem);
 +              return -1;
 +      }
 +
 +      /* Collect the completion status. */
 +      command[0] = reply->command; /* Status*/
 +      command[1] = reply->features; /* Error*/
 +      command[2] = command[3];
 +
 +      dbg_printk(MTIP_DRV_NAME
 +              "%s: Completion Status: stat %x, "
 +              "err %x, cmd %x\n",
 +              __func__,
 +              command[0],
 +              command[1],
 +              command[2]);
 +
 +      if (user_buffer && command[3]) {
 +              if (copy_to_user(user_buffer,
 +                               port->sector_buffer,
 +                               ATA_SECT_SIZE * command[3])) {
 +                      up_write(&port->dd->internal_sem);
 +                      return -EFAULT;
 +              }
 +      }
 +
 +      up_write(&port->dd->internal_sem);
 +      return 0;
 +}
 +
 +/*
 + *  Indicates whether a command has a single sector payload.
 + *
 + *  @command passed to the device to perform the certain event.
 + *  @features passed to the device to perform the certain event.
 + *
 + *  return value
 + *    1       command is one that always has a single sector payload,
 + *            regardless of the value in the Sector Count field.
 + *      0       otherwise
 + *
 + */
 +static unsigned int implicit_sector(unsigned char command,
 +                                  unsigned char features)
 +{
 +      unsigned int rv = 0;
 +
 +      /* list of commands that have an implicit sector count of 1 */
 +      switch (command) {
 +      case 0xF1:
 +      case 0xF2:
 +      case 0xF3:
 +      case 0xF4:
 +      case 0xF5:
 +      case 0xF6:
 +      case 0xE4:
 +      case 0xE8:
 +              rv = 1;
 +              break;
 +      case 0xF9:
 +              if (features == 0x03)
 +                      rv = 1;
 +              break;
 +      case 0xB0:
 +              if ((features == 0xD0) || (features == 0xD1))
 +                      rv = 1;
 +              break;
 +      case 0xB1:
 +              if ((features == 0xC2) || (features == 0xC3))
 +                      rv = 1;
 +              break;
 +      }
 +      return rv;
 +}
 +
 +/*
 + * Executes a taskfile
 + * See ide_taskfile_ioctl() for derivation
 + */
 +static int exec_drive_taskfile(struct driver_data *dd,
 +                             void __user *buf,
 +                             ide_task_request_t *req_task,
 +                             int outtotal)
 +{
 +      struct host_to_dev_fis  fis;
 +      struct host_to_dev_fis *reply;
 +      u8 *outbuf = NULL;
 +      u8 *inbuf = NULL;
 +      dma_addr_t outbuf_dma = 0;
 +      dma_addr_t inbuf_dma = 0;
 +      dma_addr_t dma_buffer = 0;
 +      int err = 0;
 +      unsigned int taskin = 0;
 +      unsigned int taskout = 0;
 +      u8 nsect = 0;
 +      unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
 +      unsigned int force_single_sector;
 +      unsigned int transfer_size;
 +      unsigned long task_file_data;
 +      int intotal = outtotal + req_task->out_size;
 +
 +      taskout = req_task->out_size;
 +      taskin = req_task->in_size;
 +      /* 130560 = 512 * 0xFF*/
 +      if (taskin > 130560 || taskout > 130560) {
 +              err = -EINVAL;
 +              goto abort;
 +      }
 +
 +      if (taskout) {
 +              outbuf = kzalloc(taskout, GFP_KERNEL);
 +              if (outbuf == NULL) {
 +                      err = -ENOMEM;
 +                      goto abort;
 +              }
 +              if (copy_from_user(outbuf, buf + outtotal, taskout)) {
 +                      err = -EFAULT;
 +                      goto abort;
 +              }
 +              outbuf_dma = pci_map_single(dd->pdev,
 +                                       outbuf,
 +                                       taskout,
 +                                       DMA_TO_DEVICE);
 +              if (outbuf_dma == 0) {
 +                      err = -ENOMEM;
 +                      goto abort;
 +              }
 +              dma_buffer = outbuf_dma;
 +      }
 +
 +      if (taskin) {
 +              inbuf = kzalloc(taskin, GFP_KERNEL);
 +              if (inbuf == NULL) {
 +                      err = -ENOMEM;
 +                      goto abort;
 +              }
 +
 +              if (copy_from_user(inbuf, buf + intotal, taskin)) {
 +                      err = -EFAULT;
 +                      goto abort;
 +              }
 +              inbuf_dma = pci_map_single(dd->pdev,
 +                                       inbuf,
 +                                       taskin, DMA_FROM_DEVICE);
 +              if (inbuf_dma == 0) {
 +                      err = -ENOMEM;
 +                      goto abort;
 +              }
 +              dma_buffer = inbuf_dma;
 +      }
 +
 +      /* only supports PIO and non-data commands from this ioctl. */
 +      switch (req_task->data_phase) {
 +      case TASKFILE_OUT:
 +              nsect = taskout / ATA_SECT_SIZE;
 +              reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
 +              break;
 +      case TASKFILE_IN:
 +              reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
 +              break;
 +      case TASKFILE_NO_DATA:
 +              reply = (dd->port->rxfis + RX_FIS_D2H_REG);
 +              break;
 +      default:
 +              err = -EINVAL;
 +              goto abort;
 +      }
 +
 +      /* Lock the internal command semaphore. */
 +      down_write(&dd->internal_sem);
 +
 +      /* Build the FIS. */
 +      memset(&fis, 0, sizeof(struct host_to_dev_fis));
 +
 +      fis.type        = 0x27;
 +      fis.opts        = 1 << 7;
 +      fis.command     = req_task->io_ports[7];
 +      fis.features    = req_task->io_ports[1];
 +      fis.sect_count  = req_task->io_ports[2];
 +      fis.lba_low     = req_task->io_ports[3];
 +      fis.lba_mid     = req_task->io_ports[4];
 +      fis.lba_hi      = req_task->io_ports[5];
 +       /* Clear the dev bit*/
 +      fis.device      = req_task->io_ports[6] & ~0x10;
 +
 +      if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
 +              req_task->in_flags.all  =
 +                      IDE_TASKFILE_STD_IN_FLAGS |
 +                      (IDE_HOB_STD_IN_FLAGS << 8);
 +              fis.lba_low_ex          = req_task->hob_ports[3];
 +              fis.lba_mid_ex          = req_task->hob_ports[4];
 +              fis.lba_hi_ex           = req_task->hob_ports[5];
 +              fis.features_ex         = req_task->hob_ports[1];
 +              fis.sect_cnt_ex         = req_task->hob_ports[2];
 +
 +      } else {
 +              req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
 +      }
 +
 +      force_single_sector = implicit_sector(fis.command, fis.features);
 +
 +      if ((taskin || taskout) && (!fis.sect_count)) {
 +              if (nsect)
 +                      fis.sect_count = nsect;
 +              else {
 +                      if (!force_single_sector) {
 +                              dev_warn(&dd->pdev->dev,
 +                                      "data movement but "
 +                                      "sect_count is 0\n");
 +                                      up_write(&dd->internal_sem);
 +                                      err = -EINVAL;
 +                                      goto abort;
 +                      }
 +              }
 +      }
 +
 +      dbg_printk(MTIP_DRV_NAME
 +              "taskfile: cmd %x, feat %x, nsect %x,"
 +              " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
 +              " head/dev %x\n",
 +              fis.command,
 +              fis.features,
 +              fis.sect_count,
 +              fis.lba_low,
 +              fis.lba_mid,
 +              fis.lba_hi,
 +              fis.device);
 +
 +      switch (fis.command) {
 +      case 0x92: /* Change timeout for Download Microcode to 60 seconds.*/
 +              timeout = 60000;
 +              break;
 +      case 0xf4: /* Change timeout for Security Erase Unit to 4 minutes.*/
 +              timeout = 240000;
 +              break;
 +      case 0xe0: /* Change timeout for standby immediate to 10 seconds.*/
 +              timeout = 10000;
 +              break;
 +      case 0xf7: /* Change timeout for vendor unique command to 10 secs */
 +              timeout = 10000;
 +              break;
 +      case 0xfa: /* Change timeout for vendor unique command to 10 secs */
 +              timeout = 10000;
 +              break;
 +      default:
 +              timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
 +              break;
 +      }
 +
 +      /* Determine the correct transfer size.*/
 +      if (force_single_sector)
 +              transfer_size = ATA_SECT_SIZE;
 +      else
 +              transfer_size = ATA_SECT_SIZE * fis.sect_count;
 +
 +      /* Execute the command.*/
 +      if (mtip_exec_internal_command(dd->port,
 +                               &fis,
 +                               5,
 +                               dma_buffer,
 +                               transfer_size,
 +                               0,
 +                               GFP_KERNEL,
 +                               timeout) < 0) {
 +              up_write(&dd->internal_sem);
 +              err = -EIO;
 +              goto abort;
 +      }
 +
 +      task_file_data = readl(dd->port->mmio+PORT_TFDATA);
 +
 +      if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
 +              reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
 +              req_task->io_ports[7] = reply->control;
 +      } else {
 +              reply = dd->port->rxfis + RX_FIS_D2H_REG;
 +              req_task->io_ports[7] = reply->command;
 +      }
 +
 +      /* reclaim the DMA buffers.*/
 +      if (inbuf_dma)
 +              pci_unmap_single(dd->pdev, inbuf_dma,
 +                      taskin, DMA_FROM_DEVICE);
 +      if (outbuf_dma)
 +              pci_unmap_single(dd->pdev, outbuf_dma,
 +                      taskout, DMA_TO_DEVICE);
 +      inbuf_dma  = 0;
 +      outbuf_dma = 0;
 +
 +      /* return the ATA registers to the caller.*/
 +      req_task->io_ports[1] = reply->features;
 +      req_task->io_ports[2] = reply->sect_count;
 +      req_task->io_ports[3] = reply->lba_low;
 +      req_task->io_ports[4] = reply->lba_mid;
 +      req_task->io_ports[5] = reply->lba_hi;
 +      req_task->io_ports[6] = reply->device;
 +
 +      if (req_task->out_flags.all & 1)  {
 +
 +              req_task->hob_ports[3] = reply->lba_low_ex;
 +              req_task->hob_ports[4] = reply->lba_mid_ex;
 +              req_task->hob_ports[5] = reply->lba_hi_ex;
 +              req_task->hob_ports[1] = reply->features_ex;
 +              req_task->hob_ports[2] = reply->sect_cnt_ex;
 +      }
 +
 +      /* Com rest after secure erase or lowlevel format */
 +      if (((fis.command == 0xF4) ||
 +              ((fis.command == 0xFC) &&
 +                      (fis.features == 0x27 || fis.features == 0x72 ||
 +                       fis.features == 0x62 || fis.features == 0x26))) &&
 +                       !(reply->command & 1)) {
 +              mtip_restart_port(dd->port);
 +      }
 +
 +      dbg_printk(MTIP_DRV_NAME
 +              "%s: Completion: stat %x,"
 +              "err %x, sect_cnt %x, lbalo %x,"
 +              "lbamid %x, lbahi %x, dev %x\n",
 +              __func__,
 +              req_task->io_ports[7],
 +              req_task->io_ports[1],
 +              req_task->io_ports[2],
 +              req_task->io_ports[3],
 +              req_task->io_ports[4],
 +              req_task->io_ports[5],
 +              req_task->io_ports[6]);
 +
 +      up_write(&dd->internal_sem);
 +
 +      if (taskout) {
 +              if (copy_to_user(buf + outtotal, outbuf, taskout)) {
 +                      err = -EFAULT;
 +                      goto abort;
 +              }
 +      }
 +      if (taskin) {
 +              if (copy_to_user(buf + intotal, inbuf, taskin)) {
 +                      err = -EFAULT;
 +                      goto abort;
 +              }
 +      }
 +abort:
 +      if (inbuf_dma)
 +              pci_unmap_single(dd->pdev, inbuf_dma,
 +                                      taskin, DMA_FROM_DEVICE);
 +      if (outbuf_dma)
 +              pci_unmap_single(dd->pdev, outbuf_dma,
 +                                      taskout, DMA_TO_DEVICE);
 +      kfree(outbuf);
 +      kfree(inbuf);
 +
 +      return err;
 +}
 +
 +/*
 + * Handle IOCTL calls from the Block Layer.
 + *
 + * This function is called by the Block Layer when it receives an IOCTL
 + * command that it does not understand. If the IOCTL command is not supported
 + * this function returns -ENOTTY.
 + *
 + * @dd  Pointer to the driver data structure.
 + * @cmd IOCTL command passed from the Block Layer.
 + * @arg IOCTL argument passed from the Block Layer.
 + *
 + * return value
 + *    0       The IOCTL completed successfully.
 + *    -ENOTTY The specified command is not supported.
 + *    -EFAULT An error occurred copying data to a user space buffer.
 + *    -EIO    An error occurred while executing the command.
 + */
 +static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
 +                       unsigned long arg)
 +{
 +      switch (cmd) {
 +      case HDIO_GET_IDENTITY:
 +              if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
 +                      dev_warn(&dd->pdev->dev,
 +                              "Unable to read identity\n");
 +                      return -EIO;
 +              }
 +
 +              break;
 +      case HDIO_DRIVE_CMD:
 +      {
 +              u8 drive_command[4];
 +
 +              /* Copy the user command info to our buffer. */
 +              if (copy_from_user(drive_command,
 +                                       (void __user *) arg,
 +                                       sizeof(drive_command)))
 +                      return -EFAULT;
 +
 +              /* Execute the drive command. */
 +              if (exec_drive_command(dd->port,
 +                                       drive_command,
 +                                       (void __user *) (arg+4)))
 +                      return -EIO;
 +
 +              /* Copy the status back to the users buffer. */
 +              if (copy_to_user((void __user *) arg,
 +                                       drive_command,
 +                                       sizeof(drive_command)))
 +                      return -EFAULT;
 +
 +              break;
 +      }
 +      case HDIO_DRIVE_TASK:
 +      {
 +              u8 drive_command[7];
 +
 +              /* Copy the user command info to our buffer. */
 +              if (copy_from_user(drive_command,
 +                                       (void __user *) arg,
 +                                       sizeof(drive_command)))
 +                      return -EFAULT;
 +
 +              /* Execute the drive command. */
 +              if (exec_drive_task(dd->port, drive_command))
 +                      return -EIO;
 +
 +              /* Copy the status back to the users buffer. */
 +              if (copy_to_user((void __user *) arg,
 +                                       drive_command,
 +                                       sizeof(drive_command)))
 +                      return -EFAULT;
 +
 +              break;
 +      }
 +      case HDIO_DRIVE_TASKFILE: {
 +              ide_task_request_t req_task;
 +              int ret, outtotal;
 +
 +              if (copy_from_user(&req_task, (void __user *) arg,
 +                                      sizeof(req_task)))
 +                      return -EFAULT;
 +
 +              outtotal = sizeof(req_task);
 +
 +              ret = exec_drive_taskfile(dd, (void __user *) arg,
 +                                              &req_task, outtotal);
 +
 +              if (copy_to_user((void __user *) arg, &req_task, sizeof(req_task)))
 +                      return -EFAULT;
 +
 +              return ret;
 +      }
 +
 +      default:
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +/*
 + * Submit an IO to the hw
 + *
 + * This function is called by the block layer to issue an io
 + * to the device. Upon completion, the callback function will
 + * be called with the data parameter passed as the callback data.
 + *
 + * @dd       Pointer to the driver data structure.
 + * @start    First sector to read.
 + * @nsect    Number of sectors to read.
 + * @nents    Number of entries in scatter list for the read command.
 + * @tag      The tag of this read command.
 + * @callback Pointer to the function that should be called
 + *         when the read completes.
 + * @data     Callback data passed to the callback function
 + *         when the read completes.
 + * @barrier  If non-zero, this command must be completed before
 + *         issuing any other commands.
 + * @dir      Direction (read or write)
 + *
 + * return value
 + *    None
 + */
 +static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
 +                            int nsect, int nents, int tag, void *callback,
 +                            void *data, int barrier, int dir)
 +{
 +      struct host_to_dev_fis  *fis;
 +      struct mtip_port *port = dd->port;
 +      struct mtip_cmd *command = &port->commands[tag];
 +
 +      /* Map the scatter list for DMA access */
 +      if (dir == READ)
 +              nents = dma_map_sg(&dd->pdev->dev, command->sg,
 +                                      nents, DMA_FROM_DEVICE);
 +      else
 +              nents = dma_map_sg(&dd->pdev->dev, command->sg,
 +                                      nents, DMA_TO_DEVICE);
 +
 +      command->scatter_ents = nents;
 +
 +      /*
 +       * The number of retries for this command before it is
 +       * reported as a failure to the upper layers.
 +       */
 +      command->retries = MTIP_MAX_RETRIES;
 +
 +      /* Fill out fis */
 +      fis = command->command;
 +      fis->type        = 0x27;
 +      fis->opts        = 1 << 7;
 +      fis->command     =
 +              (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
 +      *((unsigned int *) &fis->lba_low) = (start & 0xffffff);
 +      *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xffffff);
 +      fis->device      = 1 << 6;
 +      if (barrier)
 +              fis->device |= FUA_BIT;
 +      fis->features    = nsect & 0xff;
 +      fis->features_ex = (nsect >> 8) & 0xff;
 +      fis->sect_count  = ((tag << 3) | (tag >> 5));
 +      fis->sect_cnt_ex = 0;
 +      fis->control     = 0;
 +      fis->res2        = 0;
 +      fis->res3        = 0;
 +      fill_command_sg(dd, command, nents);
 +
 +      /* Populate the command header */
 +      command->command_header->opts = cpu_to_le32(
 +                      (nents << 16) | 5 | AHCI_CMD_PREFETCH);
 +      command->command_header->byte_count = 0;
 +
 +      /*
 +       * Set the completion function and data for the command
 +       * within this layer.
 +       */
 +      command->comp_data = dd;
 +      command->comp_func = mtip_async_complete;
 +      command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
 +
 +      /*
 +       * Set the completion function and data for the command passed
 +       * from the upper layer.
 +       */
 +      command->async_data = data;
 +      command->async_callback = callback;
 +
 +      /*
 +       * Lock used to prevent this command from being issued
 +       * if an internal command is in progress.
 +       */
 +      down_read(&port->dd->internal_sem);
 +
 +      /* Issue the command to the hardware */
 +      mtip_issue_ncq_command(port, tag);
 +
 +      /* Set the command's timeout value.*/
 +      port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
 +                                      MTIP_NCQ_COMMAND_TIMEOUT_MS);
 +
 +      up_read(&port->dd->internal_sem);
 +}
 +
 +/*
 + * Release a command slot.
 + *
 + * @dd  Pointer to the driver data structure.
 + * @tag Slot tag
 + *
 + * return value
 + *      None
 + */
 +static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
 +{
 +      release_slot(dd->port, tag);
 +}
 +
 +/*
 + * Obtain a command slot and return its associated scatter list.
 + *
 + * @dd  Pointer to the driver data structure.
 + * @tag Pointer to an int that will receive the allocated command
 + *            slot tag.
 + *
 + * return value
 + *    Pointer to the scatter list for the allocated command slot
 + *    or NULL if no command slots are available.
 + */
 +static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 +                                                 int *tag)
 +{
 +      /*
 +       * It is possible that, even with this semaphore, a thread
 +       * may think that no command slots are available. Therefore, we
 +       * need to make an attempt to get_slot().
 +       */
 +      down(&dd->port->cmd_slot);
 +      *tag = get_slot(dd->port);
 +
 +      if (unlikely(*tag < 0))
 +              return NULL;
 +
 +      return dd->port->commands[*tag].sg;
 +}
 +
 +/*
 + * Sysfs register/status dump.
 + *
 + * @dev  Pointer to the device structure, passed by the kernrel.
 + * @attr Pointer to the device_attribute structure passed by the kernel.
 + * @buf  Pointer to the char buffer that will receive the stats info.
 + *
 + * return value
 + *    The size, in bytes, of the data copied into buf.
 + */
 +static ssize_t hw_show_registers(struct device *dev,
 +                              struct device_attribute *attr,
 +                              char *buf)
 +{
 +      u32 group_allocated;
 +      struct driver_data *dd = dev_to_disk(dev)->private_data;
 +      int size = 0;
 +      int n;
 +
 +      size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
 +
 +      for (n = 0; n < dd->slot_groups; n++)
 +              size += sprintf(&buf[size], "0x%08x\n",
 +                                       readl(dd->port->s_active[n]));
 +
 +      size += sprintf(&buf[size], "Command Issue:\n");
 +
 +      for (n = 0; n < dd->slot_groups; n++)
 +              size += sprintf(&buf[size], "0x%08x\n",
 +                                      readl(dd->port->cmd_issue[n]));
 +
 +      size += sprintf(&buf[size], "Allocated:\n");
 +
 +      for (n = 0; n < dd->slot_groups; n++) {
 +              if (sizeof(long) > sizeof(u32))
 +                      group_allocated =
 +                              dd->port->allocated[n/2] >> (32*(n&1));
 +              else
 +                      group_allocated = dd->port->allocated[n];
 +              size += sprintf(&buf[size], "0x%08x\n",
 +                               group_allocated);
 +      }
 +
 +      size += sprintf(&buf[size], "completed:\n");
 +
 +      for (n = 0; n < dd->slot_groups; n++)
 +              size += sprintf(&buf[size], "0x%08x\n",
 +                              readl(dd->port->completed[n]));
 +
 +      size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
 +                              readl(dd->port->mmio + PORT_IRQ_STAT));
 +      size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
 +                              readl(dd->mmio + HOST_IRQ_STAT));
 +
 +      return size;
 +}
 +static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
 +
 +/*
 + * Create the sysfs related attributes.
 + *
 + * @dd   Pointer to the driver data structure.
 + * @kobj Pointer to the kobj for the block device.
 + *
 + * return value
 + *    0       Operation completed successfully.
 + *    -EINVAL Invalid parameter.
 + */
 +static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
 +{
 +      if (!kobj || !dd)
 +              return -EINVAL;
 +
 +      if (sysfs_create_file(kobj, &dev_attr_registers.attr))
 +              dev_warn(&dd->pdev->dev,
 +                      "Error creating registers sysfs entry\n");
 +      return 0;
 +}
 +
 +/*
 + * Remove the sysfs related attributes.
 + *
 + * @dd   Pointer to the driver data structure.
 + * @kobj Pointer to the kobj for the block device.
 + *
 + * return value
 + *    0       Operation completed successfully.
 + *    -EINVAL Invalid parameter.
 + */
 +static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
 +{
 +      if (!kobj || !dd)
 +              return -EINVAL;
 +
 +      sysfs_remove_file(kobj, &dev_attr_registers.attr);
 +
 +      return 0;
 +}
 +
 +/*
 + * Perform any init/resume time hardware setup
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    None
 + */
 +static inline void hba_setup(struct driver_data *dd)
 +{
 +      u32 hwdata;
 +      hwdata = readl(dd->mmio + HOST_HSORG);
 +
 +      /* interrupt bug workaround: use only 1 IS bit.*/
 +      writel(hwdata |
 +              HSORG_DISABLE_SLOTGRP_INTR |
 +              HSORG_DISABLE_SLOTGRP_PXIS,
 +              dd->mmio + HOST_HSORG);
 +}
 +
 +/*
 + * Detect the details of the product, and store anything needed
 + * into the driver data structure.  This includes product type and
 + * version and number of slot groups.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_detect_product(struct driver_data *dd)
 +{
 +      u32 hwdata;
 +      unsigned int rev, slotgroups;
 +
 +      /*
 +       * HBA base + 0xFC [15:0] - vendor-specific hardware interface
 +       * info register:
 +       * [15:8] hardware/software interface rev#
 +       * [   3] asic-style interface
 +       * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
 +       */
 +      hwdata = readl(dd->mmio + HOST_HSORG);
 +
 +      dd->product_type = MTIP_PRODUCT_UNKNOWN;
 +      dd->slot_groups = 1;
 +
 +      if (hwdata & 0x8) {
 +              dd->product_type = MTIP_PRODUCT_ASICFPGA;
 +              rev = (hwdata & HSORG_HWREV) >> 8;
 +              slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
 +              dev_info(&dd->pdev->dev,
 +                      "ASIC-FPGA design, HS rev 0x%x, "
 +                      "%i slot groups [%i slots]\n",
 +                       rev,
 +                       slotgroups,
 +                       slotgroups * 32);
 +
 +              if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
 +                      dev_warn(&dd->pdev->dev,
 +                              "Warning: driver only supports "
 +                              "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
 +                      slotgroups = MTIP_MAX_SLOT_GROUPS;
 +              }
 +              dd->slot_groups = slotgroups;
 +              return;
 +      }
 +
 +      dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
 +}
 +
 +/*
 + * Blocking wait for FTL rebuild to complete
 + *
 + * @dd Pointer to the DRIVER_DATA structure.
 + *
 + * return value
 + *    0       FTL rebuild completed successfully
 + *    -EFAULT FTL rebuild error/timeout/interruption
 + */
 +static int mtip_ftl_rebuild_poll(struct driver_data *dd)
 +{
 +      unsigned long timeout, cnt = 0, start;
 +
 +      dev_warn(&dd->pdev->dev,
 +              "FTL rebuild in progress. Polling for completion.\n");
 +
 +      start = jiffies;
 +      dd->ftlrebuildflag = 1;
 +      timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
 +
 +      do {
 +#ifdef CONFIG_HOTPLUG
 +              if (mtip_check_surprise_removal(dd->pdev))
 +                      return -EFAULT;
 +#endif
 +              if (mtip_get_identify(dd->port, NULL) < 0)
 +                      return -EFAULT;
 +
 +              if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
 +                      MTIP_FTL_REBUILD_MAGIC) {
 +                      ssleep(1);
 +                      /* Print message every 3 minutes */
 +                      if (cnt++ >= 180) {
 +                              dev_warn(&dd->pdev->dev,
 +                              "FTL rebuild in progress (%d secs).\n",
 +                              jiffies_to_msecs(jiffies - start) / 1000);
 +                              cnt = 0;
 +                      }
 +              } else {
 +                      dev_warn(&dd->pdev->dev,
 +                              "FTL rebuild complete (%d secs).\n",
 +                      jiffies_to_msecs(jiffies - start) / 1000);
 +                      dd->ftlrebuildflag = 0;
 +                      break;
 +              }
 +              ssleep(10);
 +      } while (time_before(jiffies, timeout));
 +
 +      /* Check for timeout */
 +      if (dd->ftlrebuildflag) {
 +              dev_err(&dd->pdev->dev,
 +              "Timed out waiting for FTL rebuild to complete (%d secs).\n",
 +              jiffies_to_msecs(jiffies - start) / 1000);
 +              return -EFAULT;
 +      }
 +
 +      return 0;
 +}
 +
 +/*
 + * Called once for each card.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0 on success, else an error code.
 + */
 +static int mtip_hw_init(struct driver_data *dd)
 +{
 +      int i;
 +      int rv;
 +      unsigned int num_command_slots;
 +
 +      dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
 +
 +      mtip_detect_product(dd);
 +      if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
 +              rv = -EIO;
 +              goto out1;
 +      }
 +      num_command_slots = dd->slot_groups * 32;
 +
 +      hba_setup(dd);
 +
 +      /*
 +       * Initialize the internal semaphore
 +       * Use a rw semaphore to enable prioritization of
 +       * mgmnt ioctl traffic during heavy IO load
 +       */
 +      init_rwsem(&dd->internal_sem);
 +
 +      tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
 +
 +      dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
 +      if (!dd->port) {
 +              dev_err(&dd->pdev->dev,
 +                      "Memory allocation: port structure\n");
 +              return -ENOMEM;
 +      }
 +
 +      /* Counting semaphore to track command slot usage */
 +      sema_init(&dd->port->cmd_slot, num_command_slots - 1);
 +
 +      /* Spinlock to prevent concurrent issue */
 +      spin_lock_init(&dd->port->cmd_issue_lock);
 +
 +      /* Set the port mmio base address. */
 +      dd->port->mmio  = dd->mmio + PORT_OFFSET;
 +      dd->port->dd    = dd;
 +
 +      /* Allocate memory for the command list. */
 +      dd->port->command_list =
 +              dmam_alloc_coherent(&dd->pdev->dev,
 +                      HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
 +                      &dd->port->command_list_dma,
 +                      GFP_KERNEL);
 +      if (!dd->port->command_list) {
 +              dev_err(&dd->pdev->dev,
 +                      "Memory allocation: command list\n");
 +              rv = -ENOMEM;
 +              goto out1;
 +      }
 +
 +      /* Clear the memory we have allocated. */
 +      memset(dd->port->command_list,
 +              0,
 +              HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
 +
 +      /* Setup the addresse of the RX FIS. */
 +      dd->port->rxfis     = dd->port->command_list + HW_CMD_SLOT_SZ;
 +      dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
 +
 +      /* Setup the address of the command tables. */
 +      dd->port->command_table   = dd->port->rxfis + AHCI_RX_FIS_SZ;
 +      dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
 +
 +      /* Setup the address of the identify data. */
 +      dd->port->identify     = dd->port->command_table +
 +                                      HW_CMD_TBL_AR_SZ;
 +      dd->port->identify_dma = dd->port->command_tbl_dma +
 +                                      HW_CMD_TBL_AR_SZ;
 +
 +      /* Setup the address of the sector buffer. */
 +      dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
 +      dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
 +
 +      /* Point the command headers at the command tables. */
 +      for (i = 0; i < num_command_slots; i++) {
 +              dd->port->commands[i].command_header =
 +                                      dd->port->command_list +
 +                                      (sizeof(struct mtip_cmd_hdr) * i);
 +              dd->port->commands[i].command_header_dma =
 +                                      dd->port->command_list_dma +
 +                                      (sizeof(struct mtip_cmd_hdr) * i);
 +
 +              dd->port->commands[i].command =
 +                      dd->port->command_table + (HW_CMD_TBL_SZ * i);
 +              dd->port->commands[i].command_dma =
 +                      dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
 +
 +              if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
 +                      dd->port->commands[i].command_header->ctbau =
 +                      cpu_to_le32(
 +                      (dd->port->commands[i].command_dma >> 16) >> 16);
 +              dd->port->commands[i].command_header->ctba = cpu_to_le32(
 +                      dd->port->commands[i].command_dma & 0xffffffff);
 +
 +              /*
 +               * If this is not done, a bug is reported by the stock
 +               * FC11 i386. Due to the fact that it has lots of kernel
 +               * debugging enabled.
 +               */
 +              sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
 +
 +              /* Mark all commands as currently inactive.*/
 +              atomic_set(&dd->port->commands[i].active, 0);
 +      }
 +
 +      /* Setup the pointers to the extended s_active and CI registers. */
 +      for (i = 0; i < dd->slot_groups; i++) {
 +              dd->port->s_active[i] =
 +                      dd->port->mmio + i*0x80 + PORT_SCR_ACT;
 +              dd->port->cmd_issue[i] =
 +                      dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
 +              dd->port->completed[i] =
 +                      dd->port->mmio + i*0x80 + PORT_SDBV;
 +      }
 +
 +      /* Reset the HBA. */
 +      if (mtip_hba_reset(dd) < 0) {
 +              dev_err(&dd->pdev->dev,
 +                      "Card did not reset within timeout\n");
 +              rv = -EIO;
 +              goto out2;
 +      }
 +
 +      mtip_init_port(dd->port);
 +      mtip_start_port(dd->port);
 +
 +      /* Setup the ISR and enable interrupts. */
 +      rv = devm_request_irq(&dd->pdev->dev,
 +                              dd->pdev->irq,
 +                              mtip_irq_handler,
 +                              IRQF_SHARED,
 +                              dev_driver_string(&dd->pdev->dev),
 +                              dd);
 +
 +      if (rv) {
 +              dev_err(&dd->pdev->dev,
 +                      "Unable to allocate IRQ %d\n", dd->pdev->irq);
 +              goto out2;
 +      }
 +
 +      /* Enable interrupts on the HBA. */
 +      writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
 +                                      dd->mmio + HOST_CTL);
 +
 +      init_timer(&dd->port->cmd_timer);
 +      dd->port->cmd_timer.data = (unsigned long int) dd->port;
 +      dd->port->cmd_timer.function = mtip_timeout_function;
 +      mod_timer(&dd->port->cmd_timer,
 +              jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
 +
 +      if (mtip_get_identify(dd->port, NULL) < 0) {
 +              rv = -EFAULT;
 +              goto out3;
 +      }
 +      mtip_dump_identify(dd->port);
 +
 +      if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
 +              MTIP_FTL_REBUILD_MAGIC) {
 +              return mtip_ftl_rebuild_poll(dd);
 +      }
 +      return rv;
 +
 +out3:
 +      del_timer_sync(&dd->port->cmd_timer);
 +
 +      /* Disable interrupts on the HBA. */
 +      writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
 +                      dd->mmio + HOST_CTL);
 +
 +      /*Release the IRQ. */
 +      devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
 +
 +out2:
 +      mtip_deinit_port(dd->port);
 +
 +      /* Free the command/command header memory. */
 +      dmam_free_coherent(&dd->pdev->dev,
 +                              HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
 +                              dd->port->command_list,
 +                              dd->port->command_list_dma);
 +out1:
 +      /* Free the memory allocated for the for structure. */
 +      kfree(dd->port);
 +
 +      return rv;
 +}
 +
 +/*
 + * Called to deinitialize an interface.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0
 + */
 +static int mtip_hw_exit(struct driver_data *dd)
 +{
 +      /*
 +       * Send standby immediate (E0h) to the drive so that it
 +       * saves its state.
 +       */
 +      if (atomic_read(&dd->drv_cleanup_done) != true) {
 +
 +              mtip_standby_immediate(dd->port);
 +
 +              /* de-initialize the port. */
 +              mtip_deinit_port(dd->port);
 +
 +              /* Disable interrupts on the HBA. */
 +              writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
 +                              dd->mmio + HOST_CTL);
 +      }
 +
 +      del_timer_sync(&dd->port->cmd_timer);
 +
 +      /* Stop the bottom half tasklet. */
 +      tasklet_kill(&dd->tasklet);
 +
 +      /* Release the IRQ. */
 +      devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
 +
 +      /* Free the command/command header memory. */
 +      dmam_free_coherent(&dd->pdev->dev,
 +                      HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
 +                      dd->port->command_list,
 +                      dd->port->command_list_dma);
 +      /* Free the memory allocated for the for structure. */
 +      kfree(dd->port);
 +
 +      return 0;
 +}
 +
 +/*
 + * Issue a Standby Immediate command to the device.
 + *
 + * This function is called by the Block Layer just before the
 + * system powers off during a shutdown.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0
 + */
 +static int mtip_hw_shutdown(struct driver_data *dd)
 +{
 +      /*
 +       * Send standby immediate (E0h) to the drive so that it
 +       * saves its state.
 +       */
 +      mtip_standby_immediate(dd->port);
 +
 +      return 0;
 +}
 +
 +/*
 + * Suspend function
 + *
 + * This function is called by the Block Layer just before the
 + * system hibernates.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0       Suspend was successful
 + *    -EFAULT Suspend was not successful
 + */
 +static int mtip_hw_suspend(struct driver_data *dd)
 +{
 +      /*
 +       * Send standby immediate (E0h) to the drive
 +       * so that it saves its state.
 +       */
 +      if (mtip_standby_immediate(dd->port) != 0) {
 +              dev_err(&dd->pdev->dev,
 +                      "Failed standby-immediate command\n");
 +              return -EFAULT;
 +      }
 +
 +      /* Disable interrupts on the HBA.*/
 +      writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
 +                      dd->mmio + HOST_CTL);
 +      mtip_deinit_port(dd->port);
 +
 +      return 0;
 +}
 +
 +/*
 + * Resume function
 + *
 + * This function is called by the Block Layer as the
 + * system resumes.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0       Resume was successful
 + *      -EFAULT Resume was not successful
 + */
 +static int mtip_hw_resume(struct driver_data *dd)
 +{
 +      /* Perform any needed hardware setup steps */
 +      hba_setup(dd);
 +
 +      /* Reset the HBA */
 +      if (mtip_hba_reset(dd) != 0) {
 +              dev_err(&dd->pdev->dev,
 +                      "Unable to reset the HBA\n");
 +              return -EFAULT;
 +      }
 +
 +      /*
 +       * Enable the port, DMA engine, and FIS reception specific
 +       * h/w in controller.
 +       */
 +      mtip_init_port(dd->port);
 +      mtip_start_port(dd->port);
 +
 +      /* Enable interrupts on the HBA.*/
 +      writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
 +                      dd->mmio + HOST_CTL);
 +
 +      return 0;
 +}
 +
 +/*
 + * Helper function for reusing disk name
 + * upon hot insertion.
 + */
 +static int rssd_disk_name_format(char *prefix,
 +                               int index,
 +                               char *buf,
 +                               int buflen)
 +{
 +      const int base = 'z' - 'a' + 1;
 +      char *begin = buf + strlen(prefix);
 +      char *end = buf + buflen;
 +      char *p;
 +      int unit;
 +
 +      p = end - 1;
 +      *p = '\0';
 +      unit = base;
 +      do {
 +              if (p == begin)
 +                      return -EINVAL;
 +              *--p = 'a' + (index % unit);
 +              index = (index / unit) - 1;
 +      } while (index >= 0);
 +
 +      memmove(begin, p, end - p);
 +      memcpy(buf, prefix, strlen(prefix));
 +
 +      return 0;
 +}
 +
 +/*
 + * Block layer IOCTL handler.
 + *
 + * @dev Pointer to the block_device structure.
 + * @mode ignored
 + * @cmd IOCTL command passed from the user application.
 + * @arg Argument passed from the user application.
 + *
 + * return value
 + *    0        IOCTL completed successfully.
 + *    -ENOTTY  IOCTL not supported or invalid driver data
 + *                 structure pointer.
 + */
 +static int mtip_block_ioctl(struct block_device *dev,
 +                          fmode_t mode,
 +                          unsigned cmd,
 +                          unsigned long arg)
 +{
 +      struct driver_data *dd = dev->bd_disk->private_data;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EACCES;
 +
 +      if (!dd)
 +              return -ENOTTY;
 +
 +      switch (cmd) {
 +      case BLKFLSBUF:
 +              return 0;
 +      default:
 +              return mtip_hw_ioctl(dd, cmd, arg);
 +      }
 +}
 +
 +#ifdef CONFIG_COMPAT
 +/*
 + * Block layer compat IOCTL handler.
 + *
 + * @dev Pointer to the block_device structure.
 + * @mode ignored
 + * @cmd IOCTL command passed from the user application.
 + * @arg Argument passed from the user application.
 + *
 + * return value
 + *    0        IOCTL completed successfully.
 + *    -ENOTTY  IOCTL not supported or invalid driver data
 + *                 structure pointer.
 + */
 +static int mtip_block_compat_ioctl(struct block_device *dev,
 +                          fmode_t mode,
 +                          unsigned cmd,
 +                          unsigned long arg)
 +{
 +      struct driver_data *dd = dev->bd_disk->private_data;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EACCES;
 +
 +      if (!dd)
 +              return -ENOTTY;
 +
 +      switch (cmd) {
 +      case BLKFLSBUF:
 +              return 0;
 +      case HDIO_DRIVE_TASKFILE: {
 +              struct mtip_compat_ide_task_request_s *compat_req_task;
 +              ide_task_request_t req_task;
 +              int compat_tasksize, outtotal, ret;
 +
 +              compat_tasksize = sizeof(struct mtip_compat_ide_task_request_s);
 +
 +              compat_req_task =
 +                      (struct mtip_compat_ide_task_request_s __user *) arg;
 +
 +              if (copy_from_user(&req_task, (void __user *) arg,
 +                              compat_tasksize - (2 * sizeof(compat_long_t))))
 +                      return -EFAULT;
 +
 +              if (get_user(req_task.out_size, &compat_req_task->out_size))
 +                      return -EFAULT;
 +
 +              if (get_user(req_task.in_size, &compat_req_task->in_size))
 +                      return -EFAULT;
 +
 +              outtotal = sizeof(struct mtip_compat_ide_task_request_s);
 +
 +              ret = exec_drive_taskfile(dd, (void __user *) arg,
 +                                              &req_task, outtotal);
 +
 +              if (copy_to_user((void __user *) arg, &req_task,
 +                              compat_tasksize -
 +                              (2 * sizeof(compat_long_t))))
 +                      return -EFAULT;
 +
 +              if (put_user(req_task.out_size, &compat_req_task->out_size))
 +                      return -EFAULT;
 +
 +              if (put_user(req_task.in_size, &compat_req_task->in_size))
 +                      return -EFAULT;
 +
 +              return ret;
 +      }
 +      default:
 +              return mtip_hw_ioctl(dd, cmd, arg);
 +      }
 +}
 +#endif
 +
 +/*
 + * Obtain the geometry of the device.
 + *
 + * You may think that this function is obsolete, but some applications,
 + * fdisk for example still used CHS values. This function describes the
 + * device as having 224 heads and 56 sectors per cylinder. These values are
 + * chosen so that each cylinder is aligned on a 4KB boundary. Since a
 + * partition is described in terms of a start and end cylinder this means
 + * that each partition is also 4KB aligned. Non-aligned partitions adversely
 + * affects performance.
 + *
 + * @dev Pointer to the block_device strucutre.
 + * @geo Pointer to a hd_geometry structure.
 + *
 + * return value
 + *    0       Operation completed successfully.
 + *    -ENOTTY An error occurred while reading the drive capacity.
 + */
 +static int mtip_block_getgeo(struct block_device *dev,
 +                              struct hd_geometry *geo)
 +{
 +      struct driver_data *dd = dev->bd_disk->private_data;
 +      sector_t capacity;
 +
 +      if (!dd)
 +              return -ENOTTY;
 +
 +      if (!(mtip_hw_get_capacity(dd, &capacity))) {
 +              dev_warn(&dd->pdev->dev,
 +                      "Could not get drive capacity.\n");
 +              return -ENOTTY;
 +      }
 +
 +      geo->heads = 224;
 +      geo->sectors = 56;
 +#if BITS_PER_LONG == 64
 +      geo->cylinders = capacity / (geo->heads * geo->sectors);
 +#else
 +      do_div(capacity, (geo->heads * geo->sectors));
 +      geo->cylinders = capacity;
 +#endif
 +      return 0;
 +}
 +
 +/*
 + * Block device operation function.
 + *
 + * This structure contains pointers to the functions required by the block
 + * layer.
 + */
 +static const struct block_device_operations mtip_block_ops = {
 +      .ioctl          = mtip_block_ioctl,
 +#ifdef CONFIG_COMPAT
 +      .compat_ioctl   = mtip_block_compat_ioctl,
 +#endif
 +      .getgeo         = mtip_block_getgeo,
 +      .owner          = THIS_MODULE
 +};
 +
 +/*
 + * Block layer make request function.
 + *
 + * This function is called by the kernel to process a BIO for
 + * the P320 device.
 + *
 + * @queue Pointer to the request queue. Unused other than to obtain
 + *              the driver data structure.
 + * @bio   Pointer to the BIO.
 + *
 + * return value
 + *    0
 + */
 +static void mtip_make_request(struct request_queue *queue, struct bio *bio)
 +{
 +      struct driver_data *dd = queue->queuedata;
 +      struct scatterlist *sg;
 +      struct bio_vec *bvec;
 +      int nents = 0;
 +      int tag = 0;
 +
 +      if (unlikely(!bio_has_data(bio))) {
 +              blk_queue_flush(queue, 0);
 +              bio_endio(bio, 0);
 +              return;
 +      }
 +
 +      if (unlikely(atomic_read(&dd->eh_active))) {
 +              bio_endio(bio, -EBUSY);
 +              return;
 +      }
 +
 +      sg = mtip_hw_get_scatterlist(dd, &tag);
 +      if (likely(sg != NULL)) {
 +              blk_queue_bounce(queue, &bio);
 +
 +              if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
 +                      dev_warn(&dd->pdev->dev,
 +                              "Maximum number of SGL entries exceeded");
 +                      bio_io_error(bio);
 +                      mtip_hw_release_scatterlist(dd, tag);
 +                      return;
 +              }
 +
 +              /* Create the scatter list for this bio. */
 +              bio_for_each_segment(bvec, bio, nents) {
 +                      sg_set_page(&sg[nents],
 +                                      bvec->bv_page,
 +                                      bvec->bv_len,
 +                                      bvec->bv_offset);
 +              }
 +
 +              /* Issue the read/write. */
 +              mtip_hw_submit_io(dd,
 +                              bio->bi_sector,
 +                              bio_sectors(bio),
 +                              nents,
 +                              tag,
 +                              bio_endio,
 +                              bio,
 +                              bio->bi_rw & REQ_FLUSH,
 +                              bio_data_dir(bio));
 +      } else {
 +              bio_io_error(bio);
 +      }
 +}
 +
 +/*
 + * Block layer initialization function.
 + *
 + * This function is called once by the PCI layer for each P320
 + * device that is connected to the system.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0 on success else an error code.
 + */
 +static int mtip_block_initialize(struct driver_data *dd)
 +{
 +      int rv = 0;
 +      sector_t capacity;
 +      unsigned int index = 0;
 +      struct kobject *kobj;
 +
 +      /* Initialize the protocol layer. */
 +      rv = mtip_hw_init(dd);
 +      if (rv < 0) {
 +              dev_err(&dd->pdev->dev,
 +                      "Protocol layer initialization failed\n");
 +              rv = -EINVAL;
 +              goto protocol_init_error;
 +      }
 +
 +      /* Allocate the request queue. */
 +      dd->queue = blk_alloc_queue(GFP_KERNEL);
 +      if (dd->queue == NULL) {
 +              dev_err(&dd->pdev->dev,
 +                      "Unable to allocate request queue\n");
 +              rv = -ENOMEM;
 +              goto block_queue_alloc_init_error;
 +      }
 +
 +      /* Attach our request function to the request queue. */
 +      blk_queue_make_request(dd->queue, mtip_make_request);
 +
 +      /* Set device limits. */
 +      set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
 +      blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
 +      blk_queue_physical_block_size(dd->queue, 4096);
 +      blk_queue_io_min(dd->queue, 4096);
 +
 +      dd->disk = alloc_disk(MTIP_MAX_MINORS);
 +      if (dd->disk  == NULL) {
 +              dev_err(&dd->pdev->dev,
 +                      "Unable to allocate gendisk structure\n");
 +              rv = -EINVAL;
 +              goto alloc_disk_error;
 +      }
 +
 +      /* Generate the disk name, implemented same as in sd.c */
 +      do {
 +              if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
 +                      goto ida_get_error;
 +
 +              spin_lock(&rssd_index_lock);
 +              rv = ida_get_new(&rssd_index_ida, &index);
 +              spin_unlock(&rssd_index_lock);
 +      } while (rv == -EAGAIN);
 +
 +      if (rv)
 +              goto ida_get_error;
 +
 +      rv = rssd_disk_name_format("rssd",
 +                              index,
 +                              dd->disk->disk_name,
 +                              DISK_NAME_LEN);
 +      if (rv)
 +              goto disk_index_error;
 +
 +      dd->disk->driverfs_dev  = &dd->pdev->dev;
 +      dd->disk->major         = dd->major;
 +      dd->disk->first_minor   = dd->instance * MTIP_MAX_MINORS;
 +      dd->disk->fops          = &mtip_block_ops;
 +      dd->disk->queue         = dd->queue;
 +      dd->disk->private_data  = dd;
 +      dd->queue->queuedata    = dd;
 +      dd->index               = index;
 +
 +      /* Set the capacity of the device in 512 byte sectors. */
 +      if (!(mtip_hw_get_capacity(dd, &capacity))) {
 +              dev_warn(&dd->pdev->dev,
 +                      "Could not read drive capacity\n");
 +              rv = -EIO;
 +              goto read_capacity_error;
 +      }
 +      set_capacity(dd->disk, capacity);
 +
 +      /* Enable the block device and add it to /dev */
 +      add_disk(dd->disk);
 +
 +      /*
 +       * Now that the disk is active, initialize any sysfs attributes
 +       * managed by the protocol layer.
 +       */
 +      kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
 +      if (kobj) {
 +              mtip_hw_sysfs_init(dd, kobj);
 +              kobject_put(kobj);
 +      }
 +
 +      return rv;
 +
 +read_capacity_error:
 +      /*
 +       * Delete our gendisk structure. This also removes the device
 +       * from /dev
 +       */
 +      del_gendisk(dd->disk);
 +
 +disk_index_error:
 +      spin_lock(&rssd_index_lock);
 +      ida_remove(&rssd_index_ida, index);
 +      spin_unlock(&rssd_index_lock);
 +
 +ida_get_error:
 +      put_disk(dd->disk);
 +
 +alloc_disk_error:
 +      blk_cleanup_queue(dd->queue);
 +
 +block_queue_alloc_init_error:
 +      /* De-initialize the protocol layer. */
 +      mtip_hw_exit(dd);
 +
 +protocol_init_error:
 +      return rv;
 +}
 +
 +/*
 + * Block layer deinitialization function.
 + *
 + * Called by the PCI layer as each P320 device is removed.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0
 + */
 +static int mtip_block_remove(struct driver_data *dd)
 +{
 +      struct kobject *kobj;
 +      /* Clean up the sysfs attributes managed by the protocol layer. */
 +      kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
 +      if (kobj) {
 +              mtip_hw_sysfs_exit(dd, kobj);
 +              kobject_put(kobj);
 +      }
 +
 +      /*
 +       * Delete our gendisk structure. This also removes the device
 +       * from /dev
 +       */
 +      del_gendisk(dd->disk);
 +      blk_cleanup_queue(dd->queue);
 +      dd->disk  = NULL;
 +      dd->queue = NULL;
 +
 +      /* De-initialize the protocol layer. */
 +      mtip_hw_exit(dd);
 +
 +      return 0;
 +}
 +
 +/*
 + * Function called by the PCI layer when just before the
 + * machine shuts down.
 + *
 + * If a protocol layer shutdown function is present it will be called
 + * by this function.
 + *
 + * @dd Pointer to the driver data structure.
 + *
 + * return value
 + *    0
 + */
 +static int mtip_block_shutdown(struct driver_data *dd)
 +{
 +      dev_info(&dd->pdev->dev,
 +              "Shutting down %s ...\n", dd->disk->disk_name);
 +
 +      /* Delete our gendisk structure, and cleanup the blk queue. */
 +      del_gendisk(dd->disk);
 +      blk_cleanup_queue(dd->queue);
 +      dd->disk  = NULL;
 +      dd->queue = NULL;
 +
 +      mtip_hw_shutdown(dd);
 +      return 0;
 +}
 +
 +static int mtip_block_suspend(struct driver_data *dd)
 +{
 +      dev_info(&dd->pdev->dev,
 +              "Suspending %s ...\n", dd->disk->disk_name);
 +      mtip_hw_suspend(dd);
 +      return 0;
 +}
 +
 +static int mtip_block_resume(struct driver_data *dd)
 +{
 +      dev_info(&dd->pdev->dev, "Resuming %s ...\n",
 +              dd->disk->disk_name);
 +      mtip_hw_resume(dd);
 +      return 0;
 +}
 +
 +/*
 + * Called for each supported PCI device detected.
 + *
 + * This function allocates the private data structure, enables the
 + * PCI device and then calls the block layer initialization function.
 + *
 + * return value
 + *    0 on success else an error code.
 + */
 +static int mtip_pci_probe(struct pci_dev *pdev,
 +                      const struct pci_device_id *ent)
 +{
 +      int rv = 0;
 +      struct driver_data *dd = NULL;
 +
 +      /* Allocate memory for this devices private data. */
 +      dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
 +      if (dd == NULL) {
 +              dev_err(&pdev->dev,
 +                      "Unable to allocate memory for driver data\n");
 +              return -ENOMEM;
 +      }
 +
 +      /* Set the atomic variable as 1 in case of SRSI */
 +      atomic_set(&dd->drv_cleanup_done, true);
 +
 +      atomic_set(&dd->resumeflag, false);
 +      atomic_set(&dd->eh_active, 0);
 +
 +      /* Attach the private data to this PCI device.  */
 +      pci_set_drvdata(pdev, dd);
 +
 +      rv = pcim_enable_device(pdev);
 +      if (rv < 0) {
 +              dev_err(&pdev->dev, "Unable to enable device\n");
 +              goto iomap_err;
 +      }
 +
 +      /* Map BAR5 to memory. */
 +      rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
 +      if (rv < 0) {
 +              dev_err(&pdev->dev, "Unable to map regions\n");
 +              goto iomap_err;
 +      }
 +
 +      if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
 +              rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 +
 +              if (rv) {
 +                      rv = pci_set_consistent_dma_mask(pdev,
 +                                              DMA_BIT_MASK(32));
 +                      if (rv) {
 +                              dev_warn(&pdev->dev,
 +                                      "64-bit DMA enable failed\n");
 +                              goto setmask_err;
 +                      }
 +              }
 +      }
 +
 +      pci_set_master(pdev);
 +
 +      if (pci_enable_msi(pdev)) {
 +              dev_warn(&pdev->dev,
 +                      "Unable to enable MSI interrupt.\n");
 +              goto block_initialize_err;
 +      }
 +
 +      /* Copy the info we may need later into the private data structure. */
 +      dd->major       = mtip_major;
 +      dd->protocol    = ent->driver_data;
 +      dd->instance    = instance;
 +      dd->pdev        = pdev;
 +
 +      /* Initialize the block layer. */
 +      rv = mtip_block_initialize(dd);
 +      if (rv < 0) {
 +              dev_err(&pdev->dev,
 +                      "Unable to initialize block layer\n");
 +              goto block_initialize_err;
 +      }
 +
 +      /*
 +       * Increment the instance count so that each device has a unique
 +       * instance number.
 +       */
 +      instance++;
 +
 +      goto done;
 +
 +block_initialize_err:
 +      pci_disable_msi(pdev);
 +
 +setmask_err:
 +      pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
 +
 +iomap_err:
 +      kfree(dd);
 +      pci_set_drvdata(pdev, NULL);
 +      return rv;
 +done:
 +      /* Set the atomic variable as 0 in case of SRSI */
 +      atomic_set(&dd->drv_cleanup_done, true);
 +
 +      return rv;
 +}
 +
 +/*
 + * Called for each probed device when the device is removed or the
 + * driver is unloaded.
 + *
 + * return value
 + *    None
 + */
 +static void mtip_pci_remove(struct pci_dev *pdev)
 +{
 +      struct driver_data *dd = pci_get_drvdata(pdev);
 +      int counter = 0;
 +
 +      if (mtip_check_surprise_removal(pdev)) {
 +              while (atomic_read(&dd->drv_cleanup_done) == false) {
 +                      counter++;
 +                      msleep(20);
 +                      if (counter == 10) {
 +                              /* Cleanup the outstanding commands */
 +                              mtip_command_cleanup(dd);
 +                              break;
 +                      }
 +              }
 +      }
 +      /* Set the atomic variable as 1 in case of SRSI */
 +      atomic_set(&dd->drv_cleanup_done, true);
 +
 +      /* Clean up the block layer. */
 +      mtip_block_remove(dd);
 +
 +      pci_disable_msi(pdev);
 +
 +      kfree(dd);
 +      pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
 +}
 +
 +/*
 + * Called for each probed device when the device is suspended.
 + *
 + * return value
 + *    0  Success
 + *    <0 Error
 + */
 +static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
 +{
 +      int rv = 0;
 +      struct driver_data *dd = pci_get_drvdata(pdev);
 +
 +      if (!dd) {
 +              dev_err(&pdev->dev,
 +                      "Driver private datastructure is NULL\n");
 +              return -EFAULT;
 +      }
 +
 +      atomic_set(&dd->resumeflag, true);
 +
 +      /* Disable ports & interrupts then send standby immediate */
 +      rv = mtip_block_suspend(dd);
 +      if (rv < 0) {
 +              dev_err(&pdev->dev,
 +                      "Failed to suspend controller\n");
 +              return rv;
 +      }
 +
 +      /*
 +       * Save the pci config space to pdev structure &
 +       * disable the device
 +       */
 +      pci_save_state(pdev);
 +      pci_disable_device(pdev);
 +
 +      /* Move to Low power state*/
 +      pci_set_power_state(pdev, PCI_D3hot);
 +
 +      return rv;
 +}
 +
 +/*
 + * Called for each probed device when the device is resumed.
 + *
 + * return value
 + *      0  Success
 + *      <0 Error
 + */
 +static int mtip_pci_resume(struct pci_dev *pdev)
 +{
 +      int rv = 0;
 +      struct driver_data *dd;
 +
 +      dd = pci_get_drvdata(pdev);
 +      if (!dd) {
 +              dev_err(&pdev->dev,
 +                      "Driver private datastructure is NULL\n");
 +              return -EFAULT;
 +      }
 +
 +      /* Move the device to active State */
 +      pci_set_power_state(pdev, PCI_D0);
 +
 +      /* Restore PCI configuration space */
 +      pci_restore_state(pdev);
 +
 +      /* Enable the PCI device*/
 +      rv = pcim_enable_device(pdev);
 +      if (rv < 0) {
 +              dev_err(&pdev->dev,
 +                      "Failed to enable card during resume\n");
 +              goto err;
 +      }
 +      pci_set_master(pdev);
 +
 +      /*
 +       * Calls hbaReset, initPort, & startPort function
 +       * then enables interrupts
 +       */
 +      rv = mtip_block_resume(dd);
 +      if (rv < 0)
 +              dev_err(&pdev->dev, "Unable to resume\n");
 +
 +err:
 +      atomic_set(&dd->resumeflag, false);
 +
 +      return rv;
 +}
 +
 +/*
 + * Shutdown routine
 + *
 + * return value
 + *      None
 + */
 +static void mtip_pci_shutdown(struct pci_dev *pdev)
 +{
 +      struct driver_data *dd = pci_get_drvdata(pdev);
 +      if (dd)
 +              mtip_block_shutdown(dd);
 +}
 +
 +/* Table of device ids supported by this driver. */
 +static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
 +      {  PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) },
 +      { 0 }
 +};
 +
 +/* Structure that describes the PCI driver functions. */
 +static struct pci_driver mtip_pci_driver = {
 +      .name                   = MTIP_DRV_NAME,
 +      .id_table               = mtip_pci_tbl,
 +      .probe                  = mtip_pci_probe,
 +      .remove                 = mtip_pci_remove,
 +      .suspend                = mtip_pci_suspend,
 +      .resume                 = mtip_pci_resume,
 +      .shutdown               = mtip_pci_shutdown,
 +};
 +
 +MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
 +
 +/*
 + * Module initialization function.
 + *
 + * Called once when the module is loaded. This function allocates a major
 + * block device number to the Cyclone devices and registers the PCI layer
 + * of the driver.
 + *
 + * Return value
 + *      0 on success else error code.
 + */
 +static int __init mtip_init(void)
 +{
 +      printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
 +
 +      /* Allocate a major block device number to use with this driver. */
 +      mtip_major = register_blkdev(0, MTIP_DRV_NAME);
 +      if (mtip_major < 0) {
 +              printk(KERN_ERR "Unable to register block device (%d)\n",
 +              mtip_major);
 +              return -EBUSY;
 +      }
 +
 +      /* Register our PCI operations. */
 +      return pci_register_driver(&mtip_pci_driver);
 +}
 +
 +/*
 + * Module de-initialization function.
 + *
 + * Called once when the module is unloaded. This function deallocates
 + * the major block device number allocated by mtip_init() and
 + * unregisters the PCI layer of the driver.
 + *
 + * Return value
 + *      none
 + */
 +static void __exit mtip_exit(void)
 +{
 +      /* Release the allocated major block device number. */
 +      unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 +
 +      /* Unregister the PCI driver. */
 +      pci_unregister_driver(&mtip_pci_driver);
 +}
 +
 +MODULE_AUTHOR("Micron Technology, Inc");
 +MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
 +MODULE_LICENSE("GPL");
 +MODULE_VERSION(MTIP_DRV_VERSION);
 +
 +module_init(mtip_init);
 +module_exit(mtip_exit);
Simple merge
Simple merge
Simple merge
Simple merge
index f62fde21e9626edc19e2374a1fb8850b922e9410,2f138b5ded363305c17433d5ccae647dbe10996a..7f0739bab8d0aa1a1d2b94b6afa830989eca0836
  
  #include <linux/kernel.h>
  #include <linux/cpuidle.h>
 -#include <linux/pm_qos_params.h>
 +#include <linux/pm_qos.h>
  #include <linux/moduleparam.h>
  #include <linux/jiffies.h>
+ #include <linux/module.h>
  
  #include <asm/io.h>
  #include <asm/uaccess.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index b588f8a41e601dfae772ee53570eec068bf5e78c,73a177b92e84d417e220067429749982552d5ff9..0f189b34bda3bcb2c726f78fdfc086414f5fd61c
  #include <linux/basic_mmio_gpio.h>
  #include <linux/of.h>
  #include <linux/of_device.h>
+ #include <linux/module.h>
  #include <asm-generic/bug.h>
  
 +#define irq_to_gpio(irq)      ((irq) - MXC_GPIO_IRQ_START)
 +
  enum mxc_gpio_hwtype {
        IMX1_GPIO,      /* runs on i.mx1 */
        IMX21_GPIO,     /* runs on i.mx21 and i.mx27 */
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc drivers/md/md.c
Simple merge
Simple merge
index cdd71d67def94ddf1feb485c331fbc1d312ab2d8,0000000000000000000000000000000000000000..e7071f66dc39d18b0b241a7a5ee44b244991d6b7
mode 100644,000000..100644
--- /dev/null
@@@ -1,571 -1,0 +1,571 @@@
- #include <linux/module.h>
 +/*
 + * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
 + *
 + * This file is released under the GPL.
 + */
 +
 +#include "dm-btree.h"
 +#include "dm-btree-internal.h"
 +#include "dm-transaction-manager.h"
 +
++#include <linux/export.h>
 +
 +/*
 + * Removing an entry from a btree
 + * ==============================
 + *
 + * A very important constraint for our btree is that no node, except the
 + * root, may have fewer than a certain number of entries.
 + * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
 + *
 + * Ensuring this is complicated by the way we want to only ever hold the
 + * locks on 2 nodes concurrently, and only change nodes in a top to bottom
 + * fashion.
 + *
 + * Each node may have a left or right sibling.  When decending the spine,
 + * if a node contains only MIN_ENTRIES then we try and increase this to at
 + * least MIN_ENTRIES + 1.  We do this in the following ways:
 + *
 + * [A] No siblings => this can only happen if the node is the root, in which
 + *     case we copy the childs contents over the root.
 + *
 + * [B] No left sibling
 + *     ==> rebalance(node, right sibling)
 + *
 + * [C] No right sibling
 + *     ==> rebalance(left sibling, node)
 + *
 + * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
 + *     ==> delete node adding it's contents to left and right
 + *
 + * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
 + *     ==> rebalance(left, node, right)
 + *
 + * After these operations it's possible that the our original node no
 + * longer contains the desired sub tree.  For this reason this rebalancing
 + * is performed on the children of the current node.  This also avoids
 + * having a special case for the root.
 + *
 + * Once this rebalancing has occurred we can then step into the child node
 + * for internal nodes.  Or delete the entry for leaf nodes.
 + */
 +
 +/*
 + * Some little utilities for moving node data around.
 + */
 +static void node_shift(struct node *n, int shift)
 +{
 +      uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
 +
 +      if (shift < 0) {
 +              shift = -shift;
 +              memmove(key_ptr(n, 0),
 +                      key_ptr(n, shift),
 +                      (nr_entries - shift) * sizeof(__le64));
 +              memmove(value_ptr(n, 0, sizeof(__le64)),
 +                      value_ptr(n, shift, sizeof(__le64)),
 +                      (nr_entries - shift) * sizeof(__le64));
 +      } else {
 +              memmove(key_ptr(n, shift),
 +                      key_ptr(n, 0),
 +                      nr_entries * sizeof(__le64));
 +              memmove(value_ptr(n, shift, sizeof(__le64)),
 +                      value_ptr(n, 0, sizeof(__le64)),
 +                      nr_entries * sizeof(__le64));
 +      }
 +}
 +
 +static void node_copy(struct node *left, struct node *right, int shift)
 +{
 +      uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
 +
 +      if (shift < 0) {
 +              shift = -shift;
 +              memcpy(key_ptr(left, nr_left),
 +                     key_ptr(right, 0),
 +                     shift * sizeof(__le64));
 +              memcpy(value_ptr(left, nr_left, sizeof(__le64)),
 +                     value_ptr(right, 0, sizeof(__le64)),
 +                     shift * sizeof(__le64));
 +      } else {
 +              memcpy(key_ptr(right, 0),
 +                     key_ptr(left, nr_left - shift),
 +                     shift * sizeof(__le64));
 +              memcpy(value_ptr(right, 0, sizeof(__le64)),
 +                     value_ptr(left, nr_left - shift, sizeof(__le64)),
 +                     shift * sizeof(__le64));
 +      }
 +}
 +
 +/*
 + * Delete a specific entry from a leaf node.
 + */
 +static void delete_at(struct node *n, unsigned index, size_t value_size)
 +{
 +      unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
 +      unsigned nr_to_copy = nr_entries - (index + 1);
 +
 +      if (nr_to_copy) {
 +              memmove(key_ptr(n, index),
 +                      key_ptr(n, index + 1),
 +                      nr_to_copy * sizeof(__le64));
 +
 +              memmove(value_ptr(n, index, value_size),
 +                      value_ptr(n, index + 1, value_size),
 +                      nr_to_copy * value_size);
 +      }
 +
 +      n->header.nr_entries = cpu_to_le32(nr_entries - 1);
 +}
 +
 +static unsigned del_threshold(struct node *n)
 +{
 +      return le32_to_cpu(n->header.max_entries) / 3;
 +}
 +
 +static unsigned merge_threshold(struct node *n)
 +{
 +      /*
 +       * The extra one is because we know we're potentially going to
 +       * delete an entry.
 +       */
 +      return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
 +}
 +
 +struct child {
 +      unsigned index;
 +      struct dm_block *block;
 +      struct node *n;
 +};
 +
 +static struct dm_btree_value_type le64_type = {
 +      .context = NULL,
 +      .size = sizeof(__le64),
 +      .inc = NULL,
 +      .dec = NULL,
 +      .equal = NULL
 +};
 +
 +static int init_child(struct dm_btree_info *info, struct node *parent,
 +                    unsigned index, struct child *result)
 +{
 +      int r, inc;
 +      dm_block_t root;
 +
 +      result->index = index;
 +      root = value64(parent, index);
 +
 +      r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
 +                             &result->block, &inc);
 +      if (r)
 +              return r;
 +
 +      result->n = dm_block_data(result->block);
 +
 +      if (inc)
 +              inc_children(info->tm, result->n, &le64_type);
 +
 +      return 0;
 +}
 +
 +static int exit_child(struct dm_btree_info *info, struct child *c)
 +{
 +      return dm_tm_unlock(info->tm, c->block);
 +}
 +
 +static void shift(struct node *left, struct node *right, int count)
 +{
 +      if (!count)
 +              return;
 +
 +      if (count > 0) {
 +              node_shift(right, count);
 +              node_copy(left, right, count);
 +      } else {
 +              node_copy(left, right, count);
 +              node_shift(right, count);
 +      }
 +
 +      left->header.nr_entries =
 +              cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
 +
 +      right->header.nr_entries =
 +              cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
 +}
 +
 +static void __rebalance2(struct dm_btree_info *info, struct node *parent,
 +                       struct child *l, struct child *r)
 +{
 +      struct node *left = l->n;
 +      struct node *right = r->n;
 +      uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
 +      uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
 +
 +      if (nr_left + nr_right <= merge_threshold(left)) {
 +              /*
 +               * Merge
 +               */
 +              node_copy(left, right, -nr_right);
 +              left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
 +
 +              *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
 +                      cpu_to_le64(dm_block_location(l->block));
 +              delete_at(parent, r->index, sizeof(__le64));
 +
 +              /*
 +               * We need to decrement the right block, but not it's
 +               * children, since they're still referenced by left.
 +               */
 +              dm_tm_dec(info->tm, dm_block_location(r->block));
 +      } else {
 +              /*
 +               * Rebalance.
 +               */
 +              unsigned target_left = (nr_left + nr_right) / 2;
 +
 +              shift(left, right, nr_left - target_left);
 +              *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
 +                      cpu_to_le64(dm_block_location(l->block));
 +              *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
 +                      cpu_to_le64(dm_block_location(r->block));
 +              *key_ptr(parent, r->index) = right->keys[0];
 +      }
 +}
 +
 +static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
 +                    unsigned left_index)
 +{
 +      int r;
 +      struct node *parent;
 +      struct child left, right;
 +
 +      parent = dm_block_data(shadow_current(s));
 +
 +      r = init_child(info, parent, left_index, &left);
 +      if (r)
 +              return r;
 +
 +      r = init_child(info, parent, left_index + 1, &right);
 +      if (r) {
 +              exit_child(info, &left);
 +              return r;
 +      }
 +
 +      __rebalance2(info, parent, &left, &right);
 +
 +      r = exit_child(info, &left);
 +      if (r) {
 +              exit_child(info, &right);
 +              return r;
 +      }
 +
 +      r = exit_child(info, &right);
 +      if (r)
 +              return r;
 +
 +      return 0;
 +}
 +
 +static void __rebalance3(struct dm_btree_info *info, struct node *parent,
 +                       struct child *l, struct child *c, struct child *r)
 +{
 +      struct node *left = l->n;
 +      struct node *center = c->n;
 +      struct node *right = r->n;
 +
 +      uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
 +      uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
 +      uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
 +      uint32_t max_entries = le32_to_cpu(left->header.max_entries);
 +
 +      unsigned target;
 +
 +      if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
 +              /*
 +               * Delete center node:
 +               *
 +               * We dump as many entries from center as possible into
 +               * left, then the rest in right, then rebalance2.  This
 +               * wastes some cpu, but I want something simple atm.
 +               */
 +              unsigned shift = min(max_entries - nr_left, nr_center);
 +
 +              node_copy(left, center, -shift);
 +              left->header.nr_entries = cpu_to_le32(nr_left + shift);
 +
 +              if (shift != nr_center) {
 +                      shift = nr_center - shift;
 +                      node_shift(right, shift);
 +                      node_copy(center, right, shift);
 +                      right->header.nr_entries = cpu_to_le32(nr_right + shift);
 +              }
 +
 +              *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
 +                      cpu_to_le64(dm_block_location(l->block));
 +              *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
 +                      cpu_to_le64(dm_block_location(r->block));
 +              *key_ptr(parent, r->index) = right->keys[0];
 +
 +              delete_at(parent, c->index, sizeof(__le64));
 +              r->index--;
 +
 +              dm_tm_dec(info->tm, dm_block_location(c->block));
 +              __rebalance2(info, parent, l, r);
 +
 +              return;
 +      }
 +
 +      /*
 +       * Rebalance
 +       */
 +      target = (nr_left + nr_center + nr_right) / 3;
 +      BUG_ON(target == nr_center);
 +
 +      /*
 +       * Adjust the left node
 +       */
 +      shift(left, center, nr_left - target);
 +
 +      /*
 +       * Adjust the right node
 +       */
 +      shift(center, right, target - nr_right);
 +
 +      *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
 +              cpu_to_le64(dm_block_location(l->block));
 +      *((__le64 *) value_ptr(parent, c->index, sizeof(__le64))) =
 +              cpu_to_le64(dm_block_location(c->block));
 +      *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
 +              cpu_to_le64(dm_block_location(r->block));
 +
 +      *key_ptr(parent, c->index) = center->keys[0];
 +      *key_ptr(parent, r->index) = right->keys[0];
 +}
 +
 +static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
 +                    unsigned left_index)
 +{
 +      int r;
 +      struct node *parent = dm_block_data(shadow_current(s));
 +      struct child left, center, right;
 +
 +      /*
 +       * FIXME: fill out an array?
 +       */
 +      r = init_child(info, parent, left_index, &left);
 +      if (r)
 +              return r;
 +
 +      r = init_child(info, parent, left_index + 1, &center);
 +      if (r) {
 +              exit_child(info, &left);
 +              return r;
 +      }
 +
 +      r = init_child(info, parent, left_index + 2, &right);
 +      if (r) {
 +              exit_child(info, &left);
 +              exit_child(info, &center);
 +              return r;
 +      }
 +
 +      __rebalance3(info, parent, &left, &center, &right);
 +
 +      r = exit_child(info, &left);
 +      if (r) {
 +              exit_child(info, &center);
 +              exit_child(info, &right);
 +              return r;
 +      }
 +
 +      r = exit_child(info, &center);
 +      if (r) {
 +              exit_child(info, &right);
 +              return r;
 +      }
 +
 +      r = exit_child(info, &right);
 +      if (r)
 +              return r;
 +
 +      return 0;
 +}
 +
 +static int get_nr_entries(struct dm_transaction_manager *tm,
 +                        dm_block_t b, uint32_t *result)
 +{
 +      int r;
 +      struct dm_block *block;
 +      struct node *n;
 +
 +      r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
 +      if (r)
 +              return r;
 +
 +      n = dm_block_data(block);
 +      *result = le32_to_cpu(n->header.nr_entries);
 +
 +      return dm_tm_unlock(tm, block);
 +}
 +
 +static int rebalance_children(struct shadow_spine *s,
 +                            struct dm_btree_info *info, uint64_t key)
 +{
 +      int i, r, has_left_sibling, has_right_sibling;
 +      uint32_t child_entries;
 +      struct node *n;
 +
 +      n = dm_block_data(shadow_current(s));
 +
 +      if (le32_to_cpu(n->header.nr_entries) == 1) {
 +              struct dm_block *child;
 +              dm_block_t b = value64(n, 0);
 +
 +              r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
 +              if (r)
 +                      return r;
 +
 +              memcpy(n, dm_block_data(child),
 +                     dm_bm_block_size(dm_tm_get_bm(info->tm)));
 +              r = dm_tm_unlock(info->tm, child);
 +              dm_tm_dec(info->tm, dm_block_location(child));
 +
 +              return r;
 +      }
 +
 +      i = lower_bound(n, key);
 +      if (i < 0)
 +              return -ENODATA;
 +
 +      r = get_nr_entries(info->tm, value64(n, i), &child_entries);
 +      if (r)
 +              return r;
 +
 +      if (child_entries > del_threshold(n))
 +              return 0;
 +
 +      has_left_sibling = i > 0 ? 1 : 0;
 +      has_right_sibling =
 +              (i >= (le32_to_cpu(n->header.nr_entries) - 1)) ? 0 : 1;
 +
 +      if (!has_left_sibling)
 +              r = rebalance2(s, info, i);
 +
 +      else if (!has_right_sibling)
 +              r = rebalance2(s, info, i - 1);
 +
 +      else
 +              r = rebalance3(s, info, i - 1);
 +
 +      return r;
 +}
 +
 +static int do_leaf(struct node *n, uint64_t key, unsigned *index)
 +{
 +      int i = lower_bound(n, key);
 +
 +      if ((i < 0) ||
 +          (i >= le32_to_cpu(n->header.nr_entries)) ||
 +          (le64_to_cpu(n->keys[i]) != key))
 +              return -ENODATA;
 +
 +      *index = i;
 +
 +      return 0;
 +}
 +
 +/*
 + * Prepares for removal from one level of the hierarchy.  The caller must
 + * actually call delete_at() to remove the entry at index.
 + */
 +static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
 +                    struct dm_btree_value_type *vt, dm_block_t root,
 +                    uint64_t key, unsigned *index)
 +{
 +      int i = *index, inc, r;
 +      struct node *n;
 +
 +      for (;;) {
 +              r = shadow_step(s, root, vt, &inc);
 +              if (r < 0)
 +                      break;
 +
 +              /*
 +               * We have to patch up the parent node, ugly, but I don't
 +               * see a way to do this automatically as part of the spine
 +               * op.
 +               */
 +              if (shadow_has_parent(s)) {
 +                      __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
 +                      memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
 +                             &location, sizeof(__le64));
 +              }
 +
 +              n = dm_block_data(shadow_current(s));
 +              if (inc)
 +                      inc_children(info->tm, n, vt);
 +
 +              if (le32_to_cpu(n->header.flags) & LEAF_NODE)
 +                      return do_leaf(n, key, index);
 +
 +              r = rebalance_children(s, info, key);
 +              if (r)
 +                      break;
 +
 +              n = dm_block_data(shadow_current(s));
 +              if (le32_to_cpu(n->header.flags) & LEAF_NODE)
 +                      return do_leaf(n, key, index);
 +
 +              i = lower_bound(n, key);
 +
 +              /*
 +               * We know the key is present, or else
 +               * rebalance_children would have returned
 +               * -ENODATA
 +               */
 +              root = value64(n, i);
 +      }
 +
 +      return r;
 +}
 +
 +int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
 +                  uint64_t *keys, dm_block_t *new_root)
 +{
 +      unsigned level, last_level = info->levels - 1;
 +      int index = 0, r = 0;
 +      struct shadow_spine spine;
 +      struct node *n;
 +
 +      init_shadow_spine(&spine, info);
 +      for (level = 0; level < info->levels; level++) {
 +              r = remove_raw(&spine, info,
 +                             (level == last_level ?
 +                              &info->value_type : &le64_type),
 +                             root, keys[level], (unsigned *)&index);
 +              if (r < 0)
 +                      break;
 +
 +              n = dm_block_data(shadow_current(&spine));
 +              if (level != last_level) {
 +                      root = value64(n, index);
 +                      continue;
 +              }
 +
 +              BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
 +
 +              if (info->value_type.dec)
 +                      info->value_type.dec(info->value_type.context,
 +                                           value_ptr(n, index, info->value_type.size));
 +
 +              delete_at(n, index, info->value_type.size);
 +
 +              r = 0;
 +              *new_root = shadow_root(&spine);
 +      }
 +
 +      exit_shadow_spine(&spine);
 +
 +      return r;
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_remove);
index ca16d5b4799a3704af61403de304b3899b37b3c5,0000000000000000000000000000000000000000..408b762532a777659905ca4870dc58b131b48396
mode 100644,000000..100644
--- /dev/null
@@@ -1,861 -1,0 +1,861 @@@
- #include <linux/module.h>
 +/*
 + * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
 + *
 + * This file is released under the GPL.
 + */
 +
 +#include "dm-btree-internal.h"
 +#include "dm-space-map.h"
 +#include "dm-transaction-manager.h"
 +
++#include <linux/export.h>
 +#include <linux/device-mapper.h>
 +
 +#define DM_MSG_PREFIX "btree"
 +
 +/*----------------------------------------------------------------
 + * Array manipulation
 + *--------------------------------------------------------------*/
 +static void memcpy_disk(void *dest, const void *src, size_t len)
 +      __dm_written_to_disk(src)
 +{
 +      memcpy(dest, src, len);
 +      __dm_unbless_for_disk(src);
 +}
 +
 +static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
 +                       unsigned index, void *elt)
 +      __dm_written_to_disk(elt)
 +{
 +      if (index < nr_elts)
 +              memmove(base + (elt_size * (index + 1)),
 +                      base + (elt_size * index),
 +                      (nr_elts - index) * elt_size);
 +
 +      memcpy_disk(base + (elt_size * index), elt, elt_size);
 +}
 +
 +/*----------------------------------------------------------------*/
 +
 +/* makes the assumption that no two keys are the same. */
 +static int bsearch(struct node *n, uint64_t key, int want_hi)
 +{
 +      int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
 +
 +      while (hi - lo > 1) {
 +              int mid = lo + ((hi - lo) / 2);
 +              uint64_t mid_key = le64_to_cpu(n->keys[mid]);
 +
 +              if (mid_key == key)
 +                      return mid;
 +
 +              if (mid_key < key)
 +                      lo = mid;
 +              else
 +                      hi = mid;
 +      }
 +
 +      return want_hi ? hi : lo;
 +}
 +
 +int lower_bound(struct node *n, uint64_t key)
 +{
 +      return bsearch(n, key, 0);
 +}
 +
 +void inc_children(struct dm_transaction_manager *tm, struct node *n,
 +                struct dm_btree_value_type *vt)
 +{
 +      unsigned i;
 +      uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
 +
 +      if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
 +              for (i = 0; i < nr_entries; i++)
 +                      dm_tm_inc(tm, value64(n, i));
 +      else if (vt->inc)
 +              for (i = 0; i < nr_entries; i++)
 +                      vt->inc(vt->context,
 +                              value_ptr(n, i, vt->size));
 +}
 +
 +static int insert_at(size_t value_size, struct node *node, unsigned index,
 +                    uint64_t key, void *value)
 +                    __dm_written_to_disk(value)
 +{
 +      uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
 +      __le64 key_le = cpu_to_le64(key);
 +
 +      if (index > nr_entries ||
 +          index >= le32_to_cpu(node->header.max_entries)) {
 +              DMERR("too many entries in btree node for insert");
 +              __dm_unbless_for_disk(value);
 +              return -ENOMEM;
 +      }
 +
 +      __dm_bless_for_disk(&key_le);
 +
 +      array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
 +      array_insert(value_base(node), value_size, nr_entries, index, value);
 +      node->header.nr_entries = cpu_to_le32(nr_entries + 1);
 +
 +      return 0;
 +}
 +
 +/*----------------------------------------------------------------*/
 +
 +/*
 + * We want 3n entries (for some n).  This works more nicely for repeated
 + * insert remove loops than (2n + 1).
 + */
 +static uint32_t calc_max_entries(size_t value_size, size_t block_size)
 +{
 +      uint32_t total, n;
 +      size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
 +
 +      block_size -= sizeof(struct node_header);
 +      total = block_size / elt_size;
 +      n = total / 3;          /* rounds down */
 +
 +      return 3 * n;
 +}
 +
 +int dm_btree_create(struct dm_btree_info *info, dm_block_t *root)
 +{
 +      int r;
 +      struct dm_block *b;
 +      struct node *n;
 +      size_t block_size;
 +      uint32_t max_entries;
 +
 +      r = new_block(info, &b);
 +      if (r < 0)
 +              return r;
 +
 +      block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
 +      max_entries = calc_max_entries(info->value_type.size, block_size);
 +
 +      n = dm_block_data(b);
 +      memset(n, 0, block_size);
 +      n->header.flags = cpu_to_le32(LEAF_NODE);
 +      n->header.nr_entries = cpu_to_le32(0);
 +      n->header.max_entries = cpu_to_le32(max_entries);
 +      n->header.value_size = cpu_to_le32(info->value_type.size);
 +
 +      *root = dm_block_location(b);
 +
 +      return unlock_block(info, b);
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_create);
 +
 +/*----------------------------------------------------------------*/
 +
 +/*
 + * Deletion uses a recursive algorithm, since we have limited stack space
 + * we explicitly manage our own stack on the heap.
 + */
 +#define MAX_SPINE_DEPTH 64
 +struct frame {
 +      struct dm_block *b;
 +      struct node *n;
 +      unsigned level;
 +      unsigned nr_children;
 +      unsigned current_child;
 +};
 +
 +struct del_stack {
 +      struct dm_transaction_manager *tm;
 +      int top;
 +      struct frame spine[MAX_SPINE_DEPTH];
 +};
 +
 +static int top_frame(struct del_stack *s, struct frame **f)
 +{
 +      if (s->top < 0) {
 +              DMERR("btree deletion stack empty");
 +              return -EINVAL;
 +      }
 +
 +      *f = s->spine + s->top;
 +
 +      return 0;
 +}
 +
 +static int unprocessed_frames(struct del_stack *s)
 +{
 +      return s->top >= 0;
 +}
 +
 +static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
 +{
 +      int r;
 +      uint32_t ref_count;
 +
 +      if (s->top >= MAX_SPINE_DEPTH - 1) {
 +              DMERR("btree deletion stack out of memory");
 +              return -ENOMEM;
 +      }
 +
 +      r = dm_tm_ref(s->tm, b, &ref_count);
 +      if (r)
 +              return r;
 +
 +      if (ref_count > 1)
 +              /*
 +               * This is a shared node, so we can just decrement its
 +               * reference counter and leave the children.
 +               */
 +              dm_tm_dec(s->tm, b);
 +
 +      else {
 +              struct frame *f = s->spine + ++s->top;
 +
 +              r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
 +              if (r) {
 +                      s->top--;
 +                      return r;
 +              }
 +
 +              f->n = dm_block_data(f->b);
 +              f->level = level;
 +              f->nr_children = le32_to_cpu(f->n->header.nr_entries);
 +              f->current_child = 0;
 +      }
 +
 +      return 0;
 +}
 +
 +static void pop_frame(struct del_stack *s)
 +{
 +      struct frame *f = s->spine + s->top--;
 +
 +      dm_tm_dec(s->tm, dm_block_location(f->b));
 +      dm_tm_unlock(s->tm, f->b);
 +}
 +
 +int dm_btree_destroy(struct dm_btree_info *info, dm_block_t root)
 +{
 +      int r;
 +      struct del_stack *s;
 +
 +      s = kmalloc(sizeof(*s), GFP_KERNEL);
 +      if (!s)
 +              return -ENOMEM;
 +
 +      s->tm = info->tm;
 +      s->top = -1;
 +
 +      r = push_frame(s, root, 1);
 +      if (r)
 +              goto out;
 +
 +      while (unprocessed_frames(s)) {
 +              uint32_t flags;
 +              struct frame *f;
 +              dm_block_t b;
 +
 +              r = top_frame(s, &f);
 +              if (r)
 +                      goto out;
 +
 +              if (f->current_child >= f->nr_children) {
 +                      pop_frame(s);
 +                      continue;
 +              }
 +
 +              flags = le32_to_cpu(f->n->header.flags);
 +              if (flags & INTERNAL_NODE) {
 +                      b = value64(f->n, f->current_child);
 +                      f->current_child++;
 +                      r = push_frame(s, b, f->level);
 +                      if (r)
 +                              goto out;
 +
 +              } else if (f->level != (info->levels - 1)) {
 +                      b = value64(f->n, f->current_child);
 +                      f->current_child++;
 +                      r = push_frame(s, b, f->level + 1);
 +                      if (r)
 +                              goto out;
 +
 +              } else {
 +                      if (info->value_type.dec) {
 +                              unsigned i;
 +
 +                              for (i = 0; i < f->nr_children; i++)
 +                                      info->value_type.dec(info->value_type.context,
 +                                                           value_ptr(f->n, i, info->value_type.size));
 +                      }
 +                      f->current_child = f->nr_children;
 +              }
 +      }
 +
 +out:
 +      kfree(s);
 +      return r;
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_destroy);
 +
 +// FIXME Implement or remove this fn before final submission.
 +int dm_btree_delete_gt(struct dm_btree_info *info, dm_block_t root, uint64_t *key,
 +                  dm_block_t *new_root)
 +{
 +      /* FIXME: implement */
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_delete_gt);
 +
 +/*----------------------------------------------------------------*/
 +
 +static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
 +                          int (*search_fn)(struct node *, uint64_t),
 +                          uint64_t *result_key, void *v, size_t value_size)
 +{
 +      int i, r;
 +      uint32_t flags, nr_entries;
 +
 +      do {
 +              r = ro_step(s, block);
 +              if (r < 0)
 +                      return r;
 +
 +              i = search_fn(ro_node(s), key);
 +
 +              flags = le32_to_cpu(ro_node(s)->header.flags);
 +              nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
 +              if (i < 0 || i >= nr_entries)
 +                      return -ENODATA;
 +
 +              if (flags & INTERNAL_NODE)
 +                      block = value64(ro_node(s), i);
 +
 +      } while (!(flags & LEAF_NODE));
 +
 +      *result_key = le64_to_cpu(ro_node(s)->keys[i]);
 +      memcpy(v, value_ptr(ro_node(s), i, value_size), value_size);
 +
 +      return 0;
 +}
 +
 +int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
 +                  uint64_t *keys, void *value_le)
 +{
 +      unsigned level, last_level = info->levels - 1;
 +      int r = -ENODATA;
 +      uint64_t rkey;
 +      __le64 internal_value_le;
 +      struct ro_spine spine;
 +
 +      init_ro_spine(&spine, info);
 +      for (level = 0; level < info->levels; level++) {
 +              size_t size;
 +              void *value_p;
 +
 +              if (level == last_level) {
 +                      value_p = value_le;
 +                      size = info->value_type.size;
 +
 +              } else {
 +                      value_p = &internal_value_le;
 +                      size = sizeof(uint64_t);
 +              }
 +
 +              r = btree_lookup_raw(&spine, root, keys[level],
 +                                   lower_bound, &rkey,
 +                                   value_p, size);
 +
 +              if (!r) {
 +                      if (rkey != keys[level]) {
 +                              exit_ro_spine(&spine);
 +                              return -ENODATA;
 +                      }
 +              } else {
 +                      exit_ro_spine(&spine);
 +                      return r;
 +              }
 +
 +              root = le64_to_cpu(internal_value_le);
 +      }
 +      exit_ro_spine(&spine);
 +
 +      return r;
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_lookup);
 +
 +/*
 + * Splits a node by creating a sibling node and shifting half the nodes
 + * contents across.  Assumes there is a parent node, and it has room for
 + * another child.
 + *
 + * Before:
 + *      +--------+
 + *      | Parent |
 + *      +--------+
 + *         |
 + *         v
 + *    +----------+
 + *    | A ++++++ |
 + *    +----------+
 + *
 + *
 + * After:
 + *            +--------+
 + *            | Parent |
 + *            +--------+
 + *              |     |
 + *              v     +------+
 + *        +---------+        |
 + *        | A* +++  |        v
 + *        +---------+   +-------+
 + *                      | B +++ |
 + *                      +-------+
 + *
 + * Where A* is a shadow of A.
 + */
 +static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
 +                             unsigned parent_index, uint64_t key)
 +{
 +      int r;
 +      size_t size;
 +      unsigned nr_left, nr_right;
 +      struct dm_block *left, *right, *parent;
 +      struct node *ln, *rn, *pn;
 +      __le64 location;
 +
 +      left = shadow_current(s);
 +
 +      r = new_block(s->info, &right);
 +      if (r < 0)
 +              return r;
 +
 +      ln = dm_block_data(left);
 +      rn = dm_block_data(right);
 +
 +      nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
 +      nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
 +
 +      ln->header.nr_entries = cpu_to_le32(nr_left);
 +
 +      rn->header.flags = ln->header.flags;
 +      rn->header.nr_entries = cpu_to_le32(nr_right);
 +      rn->header.max_entries = ln->header.max_entries;
 +      rn->header.value_size = ln->header.value_size;
 +      memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
 +
 +      size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
 +              sizeof(uint64_t) : s->info->value_type.size;
 +      memcpy(value_ptr(rn, 0, size), value_ptr(ln, nr_left, size),
 +             size * nr_right);
 +
 +      /*
 +       * Patch up the parent
 +       */
 +      parent = shadow_parent(s);
 +
 +      pn = dm_block_data(parent);
 +      location = cpu_to_le64(dm_block_location(left));
 +      __dm_bless_for_disk(&location);
 +      memcpy_disk(value_ptr(pn, parent_index, sizeof(__le64)),
 +                  &location, sizeof(__le64));
 +
 +      location = cpu_to_le64(dm_block_location(right));
 +      __dm_bless_for_disk(&location);
 +
 +      r = insert_at(sizeof(__le64), pn, parent_index + 1,
 +                    le64_to_cpu(rn->keys[0]), &location);
 +      if (r)
 +              return r;
 +
 +      if (key < le64_to_cpu(rn->keys[0])) {
 +              unlock_block(s->info, right);
 +              s->nodes[1] = left;
 +      } else {
 +              unlock_block(s->info, left);
 +              s->nodes[1] = right;
 +      }
 +
 +      return 0;
 +}
 +
 +/*
 + * Splits a node by creating two new children beneath the given node.
 + *
 + * Before:
 + *      +----------+
 + *      | A ++++++ |
 + *      +----------+
 + *
 + *
 + * After:
 + *    +------------+
 + *    | A (shadow) |
 + *    +------------+
 + *        |   |
 + *   +------+ +----+
 + *   |                     |
 + *   v                     v
 + * +-------+   +-------+
 + * | B +++ |   | C +++ |
 + * +-------+   +-------+
 + */
 +static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 +{
 +      int r;
 +      size_t size;
 +      unsigned nr_left, nr_right;
 +      struct dm_block *left, *right, *new_parent;
 +      struct node *pn, *ln, *rn;
 +      __le64 val;
 +
 +      new_parent = shadow_current(s);
 +
 +      r = new_block(s->info, &left);
 +      if (r < 0)
 +              return r;
 +
 +      r = new_block(s->info, &right);
 +      if (r < 0) {
 +              /* FIXME: put left */
 +              return r;
 +      }
 +
 +      pn = dm_block_data(new_parent);
 +      ln = dm_block_data(left);
 +      rn = dm_block_data(right);
 +
 +      nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
 +      nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
 +
 +      ln->header.flags = pn->header.flags;
 +      ln->header.nr_entries = cpu_to_le32(nr_left);
 +      ln->header.max_entries = pn->header.max_entries;
 +      ln->header.value_size = pn->header.value_size;
 +
 +      rn->header.flags = pn->header.flags;
 +      rn->header.nr_entries = cpu_to_le32(nr_right);
 +      rn->header.max_entries = pn->header.max_entries;
 +      rn->header.value_size = pn->header.value_size;
 +
 +      memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
 +      memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
 +
 +      size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
 +              sizeof(__le64) : s->info->value_type.size;
 +      memcpy(value_ptr(ln, 0, size), value_ptr(pn, 0, size), nr_left * size);
 +      memcpy(value_ptr(rn, 0, size), value_ptr(pn, nr_left, size),
 +             nr_right * size);
 +
 +      /* new_parent should just point to l and r now */
 +      pn->header.flags = cpu_to_le32(INTERNAL_NODE);
 +      pn->header.nr_entries = cpu_to_le32(2);
 +      pn->header.max_entries = cpu_to_le32(
 +              calc_max_entries(sizeof(__le64),
 +                               dm_bm_block_size(
 +                                       dm_tm_get_bm(s->info->tm))));
 +      pn->header.value_size = cpu_to_le32(sizeof(__le64));
 +
 +      val = cpu_to_le64(dm_block_location(left));
 +      __dm_bless_for_disk(&val);
 +      pn->keys[0] = ln->keys[0];
 +      memcpy_disk(value_ptr(pn, 0, sizeof(__le64)), &val, sizeof(__le64));
 +
 +      val = cpu_to_le64(dm_block_location(right));
 +      __dm_bless_for_disk(&val);
 +      pn->keys[1] = rn->keys[0];
 +      memcpy_disk(value_ptr(pn, 1, sizeof(__le64)), &val, sizeof(__le64));
 +
 +      /*
 +       * rejig the spine.  This is ugly, since it knows too
 +       * much about the spine
 +       */
 +      if (s->nodes[0] != new_parent) {
 +              unlock_block(s->info, s->nodes[0]);
 +              s->nodes[0] = new_parent;
 +      }
 +      if (key < le64_to_cpu(rn->keys[0])) {
 +              unlock_block(s->info, right);
 +              s->nodes[1] = left;
 +      } else {
 +              unlock_block(s->info, left);
 +              s->nodes[1] = right;
 +      }
 +      s->count = 2;
 +
 +      return 0;
 +}
 +
 +static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
 +                          struct dm_btree_value_type *vt,
 +                          uint64_t key, unsigned *index)
 +{
 +      int r, i = *index, inc, top = 1;
 +      struct node *node;
 +
 +      for (;;) {
 +              r = shadow_step(s, root, vt, &inc);
 +              if (r < 0)
 +                      return r;
 +
 +              node = dm_block_data(shadow_current(s));
 +              if (inc)
 +                      inc_children(s->info->tm, node, vt);
 +
 +              /*
 +               * We have to patch up the parent node, ugly, but I don't
 +               * see a way to do this automatically as part of the spine
 +               * op.
 +               */
 +              if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
 +                      __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
 +
 +                      __dm_bless_for_disk(&location);
 +                      memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
 +                                  &location, sizeof(__le64));
 +              }
 +
 +              node = dm_block_data(shadow_current(s));
 +
 +              if (node->header.nr_entries == node->header.max_entries) {
 +                      if (top)
 +                              r = btree_split_beneath(s, key);
 +                      else
 +                              r = btree_split_sibling(s, root, i, key);
 +
 +                      if (r < 0)
 +                              return r;
 +              }
 +
 +              node = dm_block_data(shadow_current(s));
 +
 +              i = lower_bound(node, key);
 +
 +              if (le32_to_cpu(node->header.flags) & LEAF_NODE)
 +                      break;
 +
 +              if (i < 0) {
 +                      /* change the bounds on the lowest key */
 +                      node->keys[0] = cpu_to_le64(key);
 +                      i = 0;
 +              }
 +
 +              root = value64(node, i);
 +              top = 0;
 +      }
 +
 +      if (i < 0 || le64_to_cpu(node->keys[i]) != key)
 +              i++;
 +
 +      /* we're about to overwrite this value, so undo the increment for it */
 +      /* FIXME: shame that inc information is leaking outside the spine.
 +       * Plus inc is just plain wrong in the event of a split */
 +      if (le64_to_cpu(node->keys[i]) == key && inc)
 +              if (vt->dec)
 +                      vt->dec(vt->context, value_ptr(node, i, vt->size));
 +
 +      *index = i;
 +      return 0;
 +}
 +
 +static int insert(struct dm_btree_info *info, dm_block_t root,
 +                uint64_t *keys, void *value, dm_block_t *new_root,
 +                int *inserted)
 +                __dm_written_to_disk(value)
 +{
 +      int r, need_insert;
 +      unsigned level, index = -1, last_level = info->levels - 1;
 +      dm_block_t block = root;
 +      struct shadow_spine spine;
 +      struct node *n;
 +      struct dm_btree_value_type le64_type;
 +
 +      le64_type.context = NULL;
 +      le64_type.size = sizeof(__le64);
 +      le64_type.inc = NULL;
 +      le64_type.dec = NULL;
 +      le64_type.equal = NULL;
 +
 +      init_shadow_spine(&spine, info);
 +
 +      for (level = 0; level < (info->levels - 1); level++) {
 +              r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
 +              if (r < 0)
 +                      goto bad;
 +
 +              n = dm_block_data(shadow_current(&spine));
 +              need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
 +                             (le64_to_cpu(n->keys[index]) != keys[level]));
 +
 +              if (need_insert) {
 +                      dm_block_t new_tree;
 +                      __le64 new_le;
 +
 +                      r = dm_btree_create(info, &new_tree);
 +                      if (r < 0)
 +                              goto bad;
 +
 +                      new_le = cpu_to_le64(new_tree);
 +                      __dm_bless_for_disk(&new_le);
 +
 +                      r = insert_at(sizeof(uint64_t), n, index,
 +                                    keys[level], &new_le);
 +                      if (r)
 +                              goto bad;
 +              }
 +
 +              if (level < last_level)
 +                      block = value64(n, index);
 +      }
 +
 +      r = btree_insert_raw(&spine, block, &info->value_type,
 +                           keys[level], &index);
 +      if (r < 0)
 +              goto bad;
 +
 +      n = dm_block_data(shadow_current(&spine));
 +      need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
 +                     (le64_to_cpu(n->keys[index]) != keys[level]));
 +
 +      if (need_insert) {
 +              if (inserted)
 +                      *inserted = 1;
 +
 +              r = insert_at(info->value_type.size, n, index,
 +                            keys[level], value);
 +              if (r)
 +                      goto bad_unblessed;
 +      } else {
 +              if (inserted)
 +                      *inserted = 0;
 +
 +              if (info->value_type.dec &&
 +                  (!info->value_type.equal ||
 +                   !info->value_type.equal(
 +                           info->value_type.context,
 +                           value_ptr(n, index, info->value_type.size),
 +                           value))) {
 +                      info->value_type.dec(info->value_type.context,
 +                                           value_ptr(n, index, info->value_type.size));
 +              }
 +              memcpy_disk(value_ptr(n, index, info->value_type.size),
 +                          value, info->value_type.size);
 +      }
 +
 +      *new_root = shadow_root(&spine);
 +      exit_shadow_spine(&spine);
 +
 +      return 0;
 +
 +bad:
 +      __dm_unbless_for_disk(value);
 +bad_unblessed:
 +      exit_shadow_spine(&spine);
 +      return r;
 +}
 +
 +int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
 +                  uint64_t *keys, void *value, dm_block_t *new_root)
 +                  __dm_written_to_disk(value)
 +{
 +      return insert(info, root, keys, value, new_root, NULL);
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_insert);
 +
 +int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
 +                         uint64_t *keys, void *value, dm_block_t *new_root,
 +                         int *inserted)
 +                         __dm_written_to_disk(value)
 +{
 +      return insert(info, root, keys, value, new_root, inserted);
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
 +
 +/*----------------------------------------------------------------*/
 +
 +int dm_btree_clone(struct dm_btree_info *info, dm_block_t root,
 +                 dm_block_t *clone)
 +{
 +      int r;
 +      struct dm_block *b, *orig_b;
 +      struct node *b_node, *orig_node;
 +
 +      /* Copy the root node */
 +      r = new_block(info, &b);
 +      if (r < 0)
 +              return r;
 +
 +      r = dm_tm_read_lock(info->tm, root, &btree_node_validator, &orig_b);
 +      if (r < 0) {
 +              dm_block_t location = dm_block_location(b);
 +
 +              unlock_block(info, b);
 +              dm_tm_dec(info->tm, location);
 +      }
 +
 +      *clone = dm_block_location(b);
 +      b_node = dm_block_data(b);
 +      orig_node = dm_block_data(orig_b);
 +
 +      memcpy(b_node, orig_node,
 +             dm_bm_block_size(dm_tm_get_bm(info->tm)));
 +      dm_tm_unlock(info->tm, orig_b);
 +      inc_children(info->tm, b_node, &info->value_type);
 +      dm_tm_unlock(info->tm, b);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_clone);
 +
 +/*----------------------------------------------------------------*/
 +
 +static int find_highest_key(struct ro_spine *s, dm_block_t block,
 +                          uint64_t *result_key, dm_block_t *next_block)
 +{
 +      int i, r;
 +      uint32_t flags;
 +
 +      do {
 +              r = ro_step(s, block);
 +              if (r < 0)
 +                      return r;
 +
 +              flags = le32_to_cpu(ro_node(s)->header.flags);
 +              i = le32_to_cpu(ro_node(s)->header.nr_entries);
 +              if (!i)
 +                      return -ENODATA;
 +              else
 +                      i--;
 +
 +              *result_key = le64_to_cpu(ro_node(s)->keys[i]);
 +              if (next_block || flags & INTERNAL_NODE)
 +                      block = value64(ro_node(s), i);
 +
 +      } while (flags & INTERNAL_NODE);
 +
 +      if (next_block)
 +              *next_block = block;
 +      return 0;
 +}
 +
 +int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
 +                            uint64_t *result_keys)
 +{
 +      int r = 0, count = 0, level;
 +      struct ro_spine spine;
 +
 +      init_ro_spine(&spine, info);
 +      for (level = 0; level < info->levels; level++) {
 +              r = find_highest_key(&spine, root, result_keys + level,
 +                                   level == info->levels - 1 ? NULL : &root);
 +              if (r == -ENODATA) {
 +                      r = 0;
 +                      break;
 +
 +              } else if (r)
 +                      break;
 +
 +              count++;
 +      }
 +      exit_ro_spine(&spine);
 +
 +      return r ? r : count;
 +}
 +EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
index 6229a4e68f88ca19f65e17623c0bbc3a834c2dee,0000000000000000000000000000000000000000..e6b9d67270eed1eae4ddb21b81b243b7c06345d5
mode 100644,000000..100644
--- /dev/null
@@@ -1,663 -1,0 +1,663 @@@
- #include <linux/module.h>
 +/*
 + * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
 + *
 + * This file is released under the GPL.
 + */
 +
 +#include "dm-space-map-common.h"
 +#include "dm-space-map-disk.h"
 +#include "dm-space-map.h"
 +#include "dm-transaction-manager.h"
 +
 +#include <linux/list.h>
 +#include <linux/slab.h>
 +#include <linux/bitops.h>
++#include <linux/export.h>
 +#include <linux/device-mapper.h>
 +
 +#define DM_MSG_PREFIX "space map disk"
 +
 +/*
 + * Bitmap validator
 + */
 +static void bitmap_prepare_for_write(struct dm_block_validator *v,
 +                                   struct dm_block *b,
 +                                   size_t block_size)
 +{
 +      struct disk_bitmap_header *disk_header = dm_block_data(b);
 +
 +      disk_header->blocknr = cpu_to_le64(dm_block_location(b));
 +      disk_header->csum = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
 +}
 +
 +static int bitmap_check(struct dm_block_validator *v,
 +                      struct dm_block *b,
 +                      size_t block_size)
 +{
 +      struct disk_bitmap_header *disk_header = dm_block_data(b);
 +      __le32 csum_disk;
 +
 +      if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
 +              DMERR("bitmap check failed blocknr %llu wanted %llu",
 +                    le64_to_cpu(disk_header->blocknr), dm_block_location(b));
 +              return -ENOTBLK;
 +      }
 +
 +      csum_disk = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
 +      if (csum_disk != disk_header->csum) {
 +              DMERR("bitmap check failed csum %u wanted %u",
 +                    le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
 +              return -EILSEQ;
 +      }
 +
 +      return 0;
 +}
 +
 +struct dm_block_validator dm_sm_bitmap_validator = {
 +      .name = "sm_bitmap",
 +      .prepare_for_write = bitmap_prepare_for_write,
 +      .check = bitmap_check
 +};
 +
 +/*----------------------------------------------------------------*/
 +
 +#define ENTRIES_PER_WORD 32
 +#define ENTRIES_SHIFT 5
 +
 +void *dm_bitmap_data(struct dm_block *b)
 +{
 +      return dm_block_data(b) + sizeof(struct disk_bitmap_header);
 +}
 +
 +#define WORD_MASK_LOW 0x5555555555555555ULL
 +#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
 +#define WORD_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
 +
 +static unsigned bitmap_word_used(void *addr, unsigned b)
 +{
 +      __le64 *words_le = addr;
 +      __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
 +
 +      uint64_t bits = le64_to_cpu(*w_le);
 +
 +      return ((bits & WORD_MASK_LOW) == WORD_MASK_LOW ||
 +              (bits & WORD_MASK_HIGH) == WORD_MASK_HIGH ||
 +              (bits & WORD_MASK_ALL) == WORD_MASK_ALL);
 +}
 +
 +unsigned sm_lookup_bitmap(void *addr, unsigned b)
 +{
 +      __le64 *words_le = addr;
 +      __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
 +
 +      b = (b & (ENTRIES_PER_WORD - 1)) << 1;
 +
 +      return (!!test_bit_le(b, (void *) w_le) << 1) |
 +              (!!test_bit_le(b + 1, (void *) w_le));
 +}
 +
 +void sm_set_bitmap(void *addr, unsigned b, unsigned val)
 +{
 +      __le64 *words_le = addr;
 +      __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
 +
 +      b = (b & (ENTRIES_PER_WORD - 1)) << 1;
 +
 +      if (val & 2)
 +              __set_bit_le(b, (void *) w_le);
 +      else
 +              __clear_bit_le(b, (void *) w_le);
 +
 +      if (val & 1)
 +              __set_bit_le(b + 1, (void *) w_le);
 +      else
 +              __clear_bit_le(b + 1, (void *) w_le);
 +}
 +
 +int sm_find_free(void *addr, unsigned begin, unsigned end,
 +               unsigned *result)
 +{
 +      while (begin < end) {
 +              if (!(begin & (ENTRIES_PER_WORD - 1)) &&
 +                  bitmap_word_used(addr, begin)) {
 +                      begin += ENTRIES_PER_WORD;
 +                      continue;
 +              }
 +
 +              if (!sm_lookup_bitmap(addr, begin)) {
 +                      *result = begin;
 +                      return 0;
 +              }
 +
 +              begin++;
 +      }
 +
 +      return -ENOSPC;
 +}
 +
 +static int disk_ll_init(struct ll_disk *io, struct dm_transaction_manager *tm)
 +{
 +      io->tm = tm;
 +      io->bitmap_info.tm = tm;
 +      io->bitmap_info.levels = 1;
 +
 +      /*
 +       * Because the new bitmap blocks are created via a shadow
 +       * operation, the old entry has already had its reference count
 +       * decremented and we don't need the btree to do any bookkeeping.
 +       */
 +      io->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
 +      io->bitmap_info.value_type.inc = NULL;
 +      io->bitmap_info.value_type.dec = NULL;
 +      io->bitmap_info.value_type.equal = NULL;
 +
 +      io->ref_count_info.tm = tm;
 +      io->ref_count_info.levels = 1;
 +      io->ref_count_info.value_type.size = sizeof(uint32_t);
 +      io->ref_count_info.value_type.inc = NULL;
 +      io->ref_count_info.value_type.dec = NULL;
 +      io->ref_count_info.value_type.equal = NULL;
 +
 +      io->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
 +
 +      if (io->block_size > (1 << 30)) {
 +              DMERR("block size too big to hold bitmaps");
 +              return -EINVAL;
 +      }
 +
 +      io->entries_per_block = (io->block_size - sizeof(struct disk_bitmap_header)) *
 +                              ENTRIES_PER_BYTE;
 +      io->nr_blocks = 0;
 +      io->bitmap_root = 0;
 +      io->ref_count_root = 0;
 +
 +      return 0;
 +}
 +
 +static int disk_ll_new(struct ll_disk *io, struct dm_transaction_manager *tm)
 +{
 +      int r;
 +
 +      r = disk_ll_init(io, tm);
 +      if (r < 0)
 +              return r;
 +
 +      io->nr_blocks = 0;
 +      io->nr_allocated = 0;
 +      r = dm_btree_create(&io->bitmap_info, &io->bitmap_root);
 +      if (r < 0)
 +              return r;
 +
 +      r = dm_btree_create(&io->ref_count_info, &io->ref_count_root);
 +      if (r < 0) {
 +              dm_btree_destroy(&io->bitmap_info, io->bitmap_root);
 +              return r;
 +      }
 +
 +      return 0;
 +}
 +
 +static int disk_ll_extend(struct ll_disk *io, dm_block_t extra_blocks)
 +{
 +      int r;
 +      dm_block_t i, nr_blocks;
 +      unsigned old_blocks, blocks;
 +
 +      nr_blocks = io->nr_blocks + extra_blocks;
 +      old_blocks = dm_sector_div_up(io->nr_blocks, io->entries_per_block);
 +      blocks = dm_sector_div_up(nr_blocks, io->entries_per_block);
 +
 +      for (i = old_blocks; i < blocks; i++) {
 +              struct dm_block *b;
 +              struct disk_index_entry idx;
 +
 +              r = dm_tm_new_block(io->tm, &dm_sm_bitmap_validator, &b);
 +              if (r < 0)
 +                      return r;
 +              idx.blocknr = cpu_to_le64(dm_block_location(b));
 +
 +              r = dm_tm_unlock(io->tm, b);
 +              if (r < 0)
 +                      return r;
 +
 +              idx.nr_free = cpu_to_le32(io->entries_per_block);
 +              idx.none_free_before = 0;
 +              __dm_bless_for_disk(&idx);
 +
 +              r = dm_btree_insert(&io->bitmap_info, io->bitmap_root,
 +                                  &i, &idx, &io->bitmap_root);
 +              if (r < 0)
 +                      return r;
 +      }
 +
 +      io->nr_blocks = nr_blocks;
 +      return 0;
 +}
 +
 +static int disk_ll_open(struct ll_disk *ll, struct dm_transaction_manager *tm,
 +                      void *root_le, size_t len)
 +{
 +      int r;
 +      struct disk_sm_root *smr = root_le;
 +
 +      if (len < sizeof(struct disk_sm_root)) {
 +              DMERR("sm_disk root too small");
 +              return -ENOMEM;
 +      }
 +
 +      r = disk_ll_init(ll, tm);
 +      if (r < 0)
 +              return r;
 +
 +      ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
 +      ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
 +      ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
 +      ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
 +
 +      return 0;
 +}
 +
 +static int disk_ll_lookup_bitmap(struct ll_disk *io, dm_block_t b, uint32_t *result)
 +{
 +      int r;
 +      dm_block_t index = b;
 +      struct disk_index_entry ie_disk;
 +      struct dm_block *blk;
 +
 +      do_div(index, io->entries_per_block);
 +      r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
 +      if (r < 0)
 +              return r;
 +
 +      r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &blk);
 +      if (r < 0)
 +              return r;
 +
 +      *result = sm_lookup_bitmap(dm_bitmap_data(blk), do_div(b, io->entries_per_block));
 +
 +      return dm_tm_unlock(io->tm, blk);
 +}
 +
 +static int disk_ll_lookup(struct ll_disk *io, dm_block_t b, uint32_t *result)
 +{
 +      __le32 rc_le;
 +      int r = disk_ll_lookup_bitmap(io, b, result);
 +
 +      if (r)
 +              return r;
 +
 +      if (*result != 3)
 +              return r;
 +
 +      r = dm_btree_lookup(&io->ref_count_info, io->ref_count_root, &b, &rc_le);
 +      if (r < 0)
 +              return r;
 +
 +      *result = le32_to_cpu(rc_le);
 +
 +      return r;
 +}
 +
 +static int disk_ll_find_free_block(struct ll_disk *io, dm_block_t begin,
 +                                 dm_block_t end, dm_block_t *result)
 +{
 +      int r;
 +      struct disk_index_entry ie_disk;
 +      dm_block_t i, index_begin = begin;
 +      dm_block_t index_end = dm_sector_div_up(end, io->entries_per_block);
 +
 +      begin = do_div(index_begin, io->entries_per_block);
 +
 +      for (i = index_begin; i < index_end; i++, begin = 0) {
 +              struct dm_block *blk;
 +              unsigned position;
 +              uint32_t bit_end;
 +
 +              r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &i, &ie_disk);
 +              if (r < 0)
 +                      return r;
 +
 +              if (le32_to_cpu(ie_disk.nr_free) <= 0)
 +                      continue;
 +
 +              r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr),
 +                                  &dm_sm_bitmap_validator, &blk);
 +              if (r < 0)
 +                      return r;
 +
 +              bit_end = (i == index_end - 1) ?
 +                      do_div(end, io->entries_per_block) : io->entries_per_block;
 +
 +              r = sm_find_free(dm_bitmap_data(blk),
 +                               max((unsigned)begin, (unsigned)le32_to_cpu(ie_disk.none_free_before)),
 +                               bit_end, &position);
 +              if (r < 0) {
 +                      dm_tm_unlock(io->tm, blk);
 +                      continue;
 +              }
 +
 +              r = dm_tm_unlock(io->tm, blk);
 +              if (r < 0)
 +                      return r;
 +
 +              *result = i * io->entries_per_block + (dm_block_t) position;
 +
 +              return 0;
 +      }
 +
 +      return -ENOSPC;
 +}
 +
 +static int disk_ll_insert(struct ll_disk *io, dm_block_t b, uint32_t ref_count)
 +{
 +      int r;
 +      uint32_t bit, old;
 +      struct dm_block *nb;
 +      dm_block_t index = b;
 +      struct disk_index_entry ie_disk;
 +      void *bm_le;
 +      int inc;
 +
 +      do_div(index, io->entries_per_block);
 +      r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
 +      if (r < 0)
 +              return r;
 +
 +      r = dm_tm_shadow_block(io->tm, le64_to_cpu(ie_disk.blocknr),
 +                             &dm_sm_bitmap_validator, &nb, &inc);
 +      if (r < 0) {
 +              DMERR("dm_tm_shadow_block() failed");
 +              return r;
 +      }
 +      ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
 +
 +      bm_le = dm_bitmap_data(nb);
 +      bit = do_div(b, io->entries_per_block);
 +      old = sm_lookup_bitmap(bm_le, bit);
 +
 +      if (ref_count <= 2) {
 +              sm_set_bitmap(bm_le, bit, ref_count);
 +
 +              if (old > 2) {
 +                      r = dm_btree_remove(&io->ref_count_info, io->ref_count_root,
 +                                          &b, &io->ref_count_root);
 +                      if (r) {
 +                              dm_tm_unlock(io->tm, nb);
 +                              return r;
 +                      }
 +              }
 +      } else {
 +              __le32 rc_le = cpu_to_le32(ref_count);
 +
 +              __dm_bless_for_disk(&rc_le);
 +
 +              sm_set_bitmap(bm_le, bit, 3);
 +              r = dm_btree_insert(&io->ref_count_info, io->ref_count_root,
 +                                  &b, &rc_le, &io->ref_count_root);
 +              if (r < 0) {
 +                      dm_tm_unlock(io->tm, nb);
 +                      DMERR("ref count insert failed");
 +                      return r;
 +              }
 +      }
 +
 +      r = dm_tm_unlock(io->tm, nb);
 +      if (r < 0)
 +              return r;
 +
 +      if (ref_count && !old) {
 +              io->nr_allocated++;
 +              ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1);
 +              if (le32_to_cpu(ie_disk.none_free_before) == b)
 +                      ie_disk.none_free_before = cpu_to_le32(b + 1);
 +
 +      } else if (old && !ref_count) {
 +              io->nr_allocated--;
 +              ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1);
 +              ie_disk.none_free_before = cpu_to_le32(min((dm_block_t) le32_to_cpu(ie_disk.none_free_before), b));
 +      }
 +
 +      __dm_bless_for_disk(&ie_disk);
 +
 +      r = dm_btree_insert(&io->bitmap_info, io->bitmap_root, &index, &ie_disk, &io->bitmap_root);
 +      if (r < 0)
 +              return r;
 +
 +      return 0;
 +}
 +
 +static int disk_ll_inc(struct ll_disk *ll, dm_block_t b)
 +{
 +      int r;
 +      uint32_t rc;
 +
 +      r = disk_ll_lookup(ll, b, &rc);
 +      if (r)
 +              return r;
 +
 +      return disk_ll_insert(ll, b, rc + 1);
 +}
 +
 +static int disk_ll_dec(struct ll_disk *ll, dm_block_t b)
 +{
 +      int r;
 +      uint32_t rc;
 +
 +      r = disk_ll_lookup(ll, b, &rc);
 +      if (r)
 +              return r;
 +
 +      if (!rc)
 +              return -EINVAL;
 +
 +      return disk_ll_insert(ll, b, rc - 1);
 +}
 +
 +/*--------------------------------------------------------------*/
 +
 +/*
 + * Space map interface.
 + */
 +struct sm_disk {
 +      struct dm_space_map sm;
 +
 +      struct ll_disk ll;
 +};
 +
 +static void sm_disk_destroy(struct dm_space_map *sm)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      kfree(smd);
 +}
 +
 +static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      return disk_ll_extend(&smd->ll, extra_blocks);
 +}
 +
 +static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      *count = smd->ll.nr_blocks;
 +
 +      return 0;
 +}
 +
 +static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      *count = smd->ll.nr_blocks - smd->ll.nr_allocated;
 +
 +      return 0;
 +}
 +
 +static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
 +                           uint32_t *result)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      return disk_ll_lookup(&smd->ll, b, result);
 +}
 +
 +static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
 +                                        int *result)
 +{
 +      int r;
 +      uint32_t count;
 +
 +      r = sm_disk_get_count(sm, b, &count);
 +      if (r)
 +              return r;
 +
 +      return count > 1;
 +}
 +
 +static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
 +                           uint32_t count)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      return disk_ll_insert(&smd->ll, b, count);
 +}
 +
 +static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      return disk_ll_inc(&smd->ll, b);
 +}
 +
 +static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      return disk_ll_dec(&smd->ll, b);
 +}
 +
 +static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
 +{
 +      int r;
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +
 +      /*
 +       * FIXME: We should start the search where we left off.
 +       */
 +      r = disk_ll_find_free_block(&smd->ll, 0, smd->ll.nr_blocks, b);
 +      if (r)
 +              return r;
 +
 +      return disk_ll_inc(&smd->ll, *b);
 +}
 +
 +static int sm_disk_commit(struct dm_space_map *sm)
 +{
 +      return 0;
 +}
 +
 +static int sm_disk_root_size(struct dm_space_map *sm, size_t *result)
 +{
 +      *result = sizeof(struct disk_sm_root);
 +
 +      return 0;
 +}
 +
 +static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
 +{
 +      struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 +      struct disk_sm_root root_le;
 +
 +      root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks);
 +      root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated);
 +      root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root);
 +      root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root);
 +
 +      if (max < sizeof(root_le))
 +              return -ENOSPC;
 +
 +      memcpy(where_le, &root_le, sizeof(root_le));
 +
 +      return 0;
 +}
 +
 +/*----------------------------------------------------------------*/
 +
 +static struct dm_space_map ops = {
 +      .destroy = sm_disk_destroy,
 +      .extend = sm_disk_extend,
 +      .get_nr_blocks = sm_disk_get_nr_blocks,
 +      .get_nr_free = sm_disk_get_nr_free,
 +      .get_count = sm_disk_get_count,
 +      .count_is_more_than_one = sm_disk_count_is_more_than_one,
 +      .set_count = sm_disk_set_count,
 +      .inc_block = sm_disk_inc_block,
 +      .dec_block = sm_disk_dec_block,
 +      .new_block = sm_disk_new_block,
 +      .commit = sm_disk_commit,
 +      .root_size = sm_disk_root_size,
 +      .copy_root = sm_disk_copy_root
 +};
 +
 +struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
 +                                     dm_block_t nr_blocks)
 +{
 +      int r;
 +      struct sm_disk *smd;
 +
 +      smd = kmalloc(sizeof(*smd), GFP_KERNEL);
 +      if (!smd)
 +              return ERR_PTR(-ENOMEM);
 +
 +      memcpy(&smd->sm, &ops, sizeof(smd->sm));
 +
 +      r = disk_ll_new(&smd->ll, tm);
 +      if (r)
 +              goto bad;
 +
 +      r = disk_ll_extend(&smd->ll, nr_blocks);
 +      if (r)
 +              goto bad;
 +
 +      r = sm_disk_commit(&smd->sm);
 +      if (r)
 +              goto bad;
 +
 +      return &smd->sm;
 +
 +bad:
 +      kfree(smd);
 +      return ERR_PTR(r);
 +}
 +EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 +
 +struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
 +                                   void *root_le, size_t len)
 +{
 +      int r;
 +      struct sm_disk *smd;
 +
 +      smd = kmalloc(sizeof(*smd), GFP_KERNEL);
 +      if (!smd)
 +              return ERR_PTR(-ENOMEM);
 +
 +      memcpy(&smd->sm, &ops, sizeof(smd->sm));
 +
 +      r = disk_ll_open(&smd->ll, tm, root_le, len);
 +      if (r)
 +              goto bad;
 +
 +      r = sm_disk_commit(&smd->sm);
 +      if (r)
 +              goto bad;
 +
 +      return &smd->sm;
 +
 +bad:
 +      kfree(smd);
 +      return ERR_PTR(r);
 +}
 +EXPORT_SYMBOL_GPL(dm_sm_disk_open);
index be594dd0a12d9afe6762d615ec27babd7ebc3166,0000000000000000000000000000000000000000..e58e89ecfd6760426485f008f5d6e56f2ab1f38b
mode 100644,000000..100644
--- /dev/null
@@@ -1,414 -1,0 +1,414 @@@
- #include <linux/module.h>
 +/*
 + * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
 + *
 + * This file is released under the GPL.
 + */
 +#include "dm-transaction-manager.h"
 +#include "dm-space-map.h"
 +#include "dm-space-map-disk.h"
 +#include "dm-space-map-metadata.h"
 +#include "dm-persistent-data-internal.h"
 +
++#include <linux/export.h>
 +#include <linux/slab.h>
 +#include <linux/device-mapper.h>
 +
 +#define DM_MSG_PREFIX "transaction manager"
 +
 +/*----------------------------------------------------------------*/
 +
 +struct shadow_info {
 +      struct hlist_node hlist;
 +      dm_block_t where;
 +};
 +
 +/*
 + * It would be nice if we scaled with the size of transaction.
 + */
 +#define HASH_SIZE 256
 +#define HASH_MASK (HASH_SIZE - 1)
 +
 +struct dm_transaction_manager {
 +      int is_clone;
 +      struct dm_transaction_manager *real;
 +
 +      struct dm_block_manager *bm;
 +      struct dm_space_map *sm;
 +
 +      spinlock_t lock;
 +      struct hlist_head buckets[HASH_SIZE];
 +};
 +
 +/*----------------------------------------------------------------*/
 +
 +static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
 +{
 +      int r = 0;
 +      unsigned bucket = dm_hash_block(b, HASH_MASK);
 +      struct shadow_info *si;
 +      struct hlist_node *n;
 +
 +      spin_lock(&tm->lock);
 +
 +      hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
 +              if (si->where == b) {
 +                      r = 1;
 +                      break;
 +              }
 +
 +      spin_unlock(&tm->lock);
 +
 +      return r;
 +}
 +
 +/*
 + * This can silently fail if there's no memory.  We're ok with this since
 + * creating redundant shadows causes no harm.
 + */
 +static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
 +{
 +      unsigned bucket;
 +      struct shadow_info *si;
 +
 +      si = kmalloc(sizeof(*si), GFP_NOIO);
 +      if (si) {
 +              si->where = b;
 +              bucket = dm_hash_block(b, HASH_MASK);
 +
 +              spin_lock(&tm->lock);
 +              hlist_add_head(&si->hlist, tm->buckets + bucket);
 +              spin_unlock(&tm->lock);
 +      }
 +}
 +
 +static void wipe_shadow_table(struct dm_transaction_manager *tm)
 +{
 +      struct shadow_info *si;
 +      struct hlist_node *n, *tmp;
 +      struct hlist_head *bucket;
 +      int i;
 +
 +      spin_lock(&tm->lock);
 +      for (i = 0; i < HASH_SIZE; i++) {
 +              bucket = tm->buckets + i;
 +              hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
 +                      kfree(si);
 +
 +              INIT_HLIST_HEAD(bucket);
 +      }
 +      spin_unlock(&tm->lock);
 +}
 +
 +/*----------------------------------------------------------------*/
 +
 +static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
 +                                                 struct dm_space_map *sm)
 +{
 +      int i;
 +      struct dm_transaction_manager *tm;
 +
 +      tm = kmalloc(sizeof(*tm), GFP_KERNEL);
 +      if (!tm)
 +              return ERR_PTR(-ENOMEM);
 +
 +      tm->is_clone = 0;
 +      tm->real = NULL;
 +      tm->bm = bm;
 +      tm->sm = sm;
 +
 +      spin_lock_init(&tm->lock);
 +      for (i = 0; i < HASH_SIZE; i++)
 +              INIT_HLIST_HEAD(tm->buckets + i);
 +
 +      return tm;
 +}
 +
 +struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
 +{
 +      struct dm_transaction_manager *tm;
 +
 +      tm = kmalloc(sizeof(*tm), GFP_KERNEL);
 +      if (tm) {
 +              tm->is_clone = 1;
 +              tm->real = real;
 +      }
 +
 +      return tm;
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 +
 +void dm_tm_destroy(struct dm_transaction_manager *tm)
 +{
 +      kfree(tm);
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_destroy);
 +
 +int dm_tm_pre_commit(struct dm_transaction_manager *tm)
 +{
 +      int r;
 +
 +      if (tm->is_clone)
 +              return -EWOULDBLOCK;
 +
 +      r = dm_sm_commit(tm->sm);
 +      if (r < 0)
 +              return r;
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
 +
 +int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
 +{
 +      if (tm->is_clone)
 +              return -EWOULDBLOCK;
 +
 +      wipe_shadow_table(tm);
 +
 +      return dm_bm_flush_and_unlock(tm->bm, root);
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_commit);
 +
 +int dm_tm_new_block(struct dm_transaction_manager *tm,
 +                  struct dm_block_validator *v,
 +                  struct dm_block **result)
 +{
 +      int r;
 +      dm_block_t new_block;
 +
 +      if (tm->is_clone)
 +              return -EWOULDBLOCK;
 +
 +      r = dm_sm_new_block(tm->sm, &new_block);
 +      if (r < 0)
 +              return r;
 +
 +      r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
 +      if (r < 0) {
 +              dm_sm_dec_block(tm->sm, new_block);
 +              return r;
 +      }
 +
 +      /*
 +       * New blocks count as shadows in that they don't need to be
 +       * shadowed again.
 +       */
 +      insert_shadow(tm, new_block);
 +
 +      return 0;
 +}
 +
 +static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
 +                        struct dm_block_validator *v,
 +                        struct dm_block **result, int *inc_children)
 +{
 +      int r;
 +      dm_block_t new;
 +      uint32_t count;
 +      struct dm_block *orig_block;
 +
 +      r = dm_sm_new_block(tm->sm, &new);
 +      if (r < 0)
 +              return r;
 +
 +      r = dm_bm_write_lock_zero(tm->bm, new, v, result);
 +      if (r < 0)
 +              goto bad_dec_block;
 +
 +      r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
 +      if (r < 0)
 +              goto bad_dec_block;
 +
 +      memcpy(dm_block_data(*result), dm_block_data(orig_block),
 +             dm_bm_block_size(tm->bm));
 +
 +      r = dm_bm_unlock(orig_block);
 +      if (r < 0)
 +              goto bad_dec_block;
 +
 +      r = dm_sm_get_count(tm->sm, orig, &count);
 +      if (r < 0)
 +              goto bad;
 +
 +      r = dm_sm_dec_block(tm->sm, orig);
 +      if (r < 0)
 +              goto bad;
 +
 +      *inc_children = count > 1;
 +
 +      return 0;
 +
 +bad:
 +      dm_bm_unlock(*result);
 +bad_dec_block:
 +      dm_sm_dec_block(tm->sm, new);
 +
 +      return r;
 +}
 +
 +int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
 +                     struct dm_block_validator *v, struct dm_block **result,
 +                     int *inc_children)
 +{
 +      int r, more_than_one;
 +
 +      if (tm->is_clone)
 +              return -EWOULDBLOCK;
 +
 +      if (is_shadow(tm, orig)) {
 +              r = dm_sm_count_is_more_than_one(tm->sm, orig, &more_than_one);
 +              if (r < 0)
 +                      return r;
 +
 +              if (!more_than_one) {
 +                      *inc_children = 0;
 +                      return dm_bm_write_lock(tm->bm, orig, v, result);
 +              }
 +              /* fall through */
 +      }
 +
 +      r = __shadow_block(tm, orig, v, result, inc_children);
 +      if (r < 0)
 +              return r;
 +
 +      insert_shadow(tm, dm_block_location(*result));
 +
 +      return r;
 +}
 +
 +int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
 +                  struct dm_block_validator *v,
 +                  struct dm_block **blk)
 +{
 +      if (tm->is_clone)
 +              return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
 +
 +      return dm_bm_read_lock(tm->bm, b, v, blk);
 +}
 +
 +int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
 +{
 +      return dm_bm_unlock(b);
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_unlock);
 +
 +void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
 +{
 +      /*
 +       * The non-blocking clone doesn't support this.
 +       */
 +      BUG_ON(tm->is_clone);
 +
 +      dm_sm_inc_block(tm->sm, b);
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_inc);
 +
 +void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
 +{
 +      /*
 +       * The non-blocking clone doesn't support this.
 +       */
 +      BUG_ON(tm->is_clone);
 +
 +      dm_sm_dec_block(tm->sm, b);
 +}
 +
 +int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
 +            uint32_t *result)
 +{
 +      if (tm->is_clone)
 +              return -EWOULDBLOCK;
 +
 +      return dm_sm_get_count(tm->sm, b, result);
 +}
 +
 +struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
 +{
 +      return tm->bm;
 +}
 +
 +/*----------------------------------------------------------------*/
 +
 +static int dm_tm_create_internal(struct dm_block_manager *bm,
 +                               dm_block_t sb_location,
 +                               struct dm_block_validator *sb_validator,
 +                               size_t root_offset, size_t root_max_len,
 +                               struct dm_transaction_manager **tm,
 +                               struct dm_space_map **sm,
 +                               struct dm_block **sblock,
 +                               int create)
 +{
 +      int r;
 +
 +      *sm = dm_sm_metadata_init();
 +      if (IS_ERR(*sm))
 +              return PTR_ERR(*sm);
 +
 +      *tm = dm_tm_create(bm, *sm);
 +      if (IS_ERR(*tm)) {
 +              dm_sm_destroy(*sm);
 +              return PTR_ERR(*tm);
 +      }
 +
 +      if (create) {
 +              r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
 +                                        sb_validator, sblock);
 +              if (r < 0) {
 +                      DMERR("couldn't lock superblock");
 +                      goto bad1;
 +              }
 +
 +              r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
 +                                        sb_location);
 +              if (r) {
 +                      DMERR("couldn't create metadata space map");
 +                      goto bad2;
 +              }
 +
 +      } else {
 +              r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
 +                                   sb_validator, sblock);
 +              if (r < 0) {
 +                      DMERR("couldn't lock superblock");
 +                      goto bad1;
 +              }
 +
 +              r = dm_sm_metadata_open(*sm, *tm,
 +                                      dm_block_data(*sblock) + root_offset,
 +                                      root_max_len);
 +              if (IS_ERR(*sm)) {
 +                      DMERR("couldn't open metadata space map");
 +                      goto bad2;
 +              }
 +      }
 +
 +      return 0;
 +
 +bad2:
 +      dm_tm_unlock(*tm, *sblock);
 +bad1:
 +      dm_tm_destroy(*tm);
 +      dm_sm_destroy(*sm);
 +      return r;
 +}
 +
 +int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
 +                       struct dm_block_validator *sb_validator,
 +                       struct dm_transaction_manager **tm,
 +                       struct dm_space_map **sm, struct dm_block **sblock)
 +{
 +      return dm_tm_create_internal(bm, sb_location, sb_validator,
 +                                   0, 0, tm, sm, sblock, 1);
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
 +
 +int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
 +                     struct dm_block_validator *sb_validator,
 +                     size_t root_offset, size_t root_max_len,
 +                     struct dm_transaction_manager **tm,
 +                     struct dm_space_map **sm, struct dm_block **sblock)
 +{
 +      return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
 +                                   root_max_len, tm, sm, sblock, 0);
 +}
 +EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
Simple merge
Simple merge
Simple merge
Simple merge
index f5d53a2023442fbfe1bb8c9bf77817e0fd051174,31e53b6a881aa2f7894d72f43bb3fe50e84495a6..d6b1cf66042d196b40a7b99c18241fe186840d1d
      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <media/saa7146.h>
+ #include <linux/module.h>
  
  LIST_HEAD(saa7146_devices);
  DEFINE_MUTEX(saa7146_devices_lock);
index a92546144eaa30dc33695104374841b73b5b2bda,e4547afcfa8809e41c9488b380c58939188ca719..71f8e018e564818225137d437ecbc369418a5519
@@@ -1,6 -1,5 +1,7 @@@
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <media/saa7146_vv.h>
+ #include <linux/module.h>
  
  /****************************************************************************/
  /* resource management functions, shamelessly stolen from saa7134 driver */
index 79ad73accb27426a058c03ef4481be174988eda0,c9c6e9a6c31d30e187ef38b480d0a46a226f1803..bc1f545c95cb2b669cae45a7e59bfe268e4d2844
@@@ -1,6 -1,5 +1,7 @@@
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/kernel.h>
+ #include <linux/export.h>
  #include <media/saa7146_vv.h>
  
  static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
index 384b358d30379dfcdef7a99428bea28c24f7b77f,3a00253fe1ee8bc6ab5e0e3fb89ef8cd451feef2..ce30533fd9724e1f802ecb6f67a6e6f42f998a65
@@@ -1,7 -1,6 +1,8 @@@
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <media/saa7146_vv.h>
  #include <media/v4l2-chip-ident.h>
+ #include <linux/module.h>
  
  static int max_memory = 32;
  
index 774d507b66cc7e841b412efe4c06ea4a6c8c1be5,977211fec137bdcf668ef7a022f23548e38ae11b..43be7238311ec513726e8ecaa13d0864c2b622dd
@@@ -1,5 -1,5 +1,6 @@@
  #include <linux/i2c.h>
 +#include <linux/mutex.h>
+ #include <linux/module.h>
  
  #include "dibx000_common.h"
  
Simple merge
Simple merge
index 5914390211ff2ac6f087ec5693c08234ec55bd7d,c2594948ca3fad7cbc7c4d52a327b1fa57a94904..12eedf4d515aa86dcf60107de69158895ae04795
@@@ -31,8 -31,8 +31,9 @@@
   */
  
  #include <linux/delay.h>
+ #include <linux/module.h>
  #include <linux/i2c.h>
 +#include <linux/module.h>
  #include <linux/slab.h>
  #include <linux/version.h>
  #include <media/adp1653.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index dc58750bb71bbfc52c45fd8afe93be1d5d288be7,77032e43af4fd5f4580d1a2d48766624be00a812..5be53ae9b61cf2a6d6c318c881e43605b3dee526
@@@ -23,8 -23,8 +23,9 @@@
  
  #include <linux/slab.h>
  #include <linux/i2c.h>
 +#include <linux/interrupt.h>
  #include <linux/pm_runtime.h>
+ #include <linux/module.h>
  #include <linux/mutex.h>
  #include <linux/mfd/core.h>
  #include <linux/mfd/max8997.h>
Simple merge
Simple merge
index e06ba9440cdbea2c7143b7dcbe9f752e9d47936c,b49b91eb44ec2b0b4e042dd046dba97ec3cb3258..36de271ba0e3a9f69ed58801ff91201e0b9d422e
@@@ -19,8 -18,8 +19,9 @@@
  #include <linux/mfd/core.h>
  #include <linux/mfd/wm8400-private.h>
  #include <linux/mfd/wm8400-audio.h>
 +#include <linux/regmap.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  
  static struct {
        u16  readable;    /* Mask of readable bits */
Simple merge
index da69aac4cc820d1bb4069bfbbaadcd50c700f172,f57375393127e32581439ce9cc0b355c82d5ca29..65138e05d188c9027748790bc6f4479984e74e35
@@@ -7,8 -7,9 +7,10 @@@
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
++#include <linux/moduleparam.h>
  #include <linux/debugfs.h>
  #include <linux/fs.h>
+ #include <linux/export.h>
  #include <linux/seq_file.h>
  #include <linux/slab.h>
  #include <linux/stat.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 47cee772f00ad249cc0828e28384b1f1b870d6bd,93da9405b4a159df27f7997548b015723915b649..67176afcf2db3ac9d6889b593458ea9c4b459a3f
@@@ -22,8 -20,9 +22,9 @@@
  #include <linux/gpio.h>
  #include <linux/mmc/card.h>
  #include <linux/mmc/host.h>
+ #include <linux/module.h>
  
 -#include <mach/gpio.h>
 +#include <asm/gpio.h>
  #include <mach/sdhci.h>
  
  #include "sdhci-pltfm.h"
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 5dc61b4ef3cdd9bf2e7fce88359d57a4def3e0af,848dce16073cfee8f3cb258541524cc9bff905df..a85bba5e3059faf03c264cad30b00f19204d8a38
@@@ -20,6 -20,8 +20,7 @@@
  
  #include "pch_gbe.h"
  #include "pch_gbe_api.h"
 -#include <linux/prefetch.h>
+ #include <linux/module.h>
  
  #define DRV_VERSION     "1.00"
  const char pch_driver_version[] = DRV_VERSION;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index fce8c904eea9e70e2656c0b0d9d8f9ddc65d4033,b2a523ed5498022a317ed37a625b2b12f65914ea..d2bdd905a4f7e954fca3534bbf427566e2b44f0c
   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   * THE POSSIBILITY OF SUCH DAMAGES.
   */
+ #include <linux/export.h>
+ #include <linux/moduleparam.h>
  
 -#include "base.h"
 +#include <linux/module.h>
 +#include <linux/seq_file.h>
 +#include <linux/list.h>
  #include "debug.h"
 +#include "ath5k.h"
 +#include "reg.h"
 +#include "base.h"
  
  static unsigned int ath5k_debug;
  module_param_named(debug, ath5k_debug, uint, 0);
Simple merge
index 9d10322eac41abb1143ce52bc5e8d1799cd67206,0000000000000000000000000000000000000000..8552da00b6ead95987ab8b02384bd608d49ba4cf
mode 100644,000000..100644
--- /dev/null
@@@ -1,1303 -1,0 +1,1304 @@@
 +
 +/*
 + * Copyright (c) 2011 Atheros Communications Inc.
 + *
 + * Permission to use, copy, modify, and/or distribute this software for any
 + * purpose with or without fee is hereby granted, provided that the above
 + * copyright notice and this permission notice appear in all copies.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 + */
 +
++#include <linux/moduleparam.h>
 +#include <linux/mmc/sdio_func.h>
 +#include "core.h"
 +#include "cfg80211.h"
 +#include "target.h"
 +#include "debug.h"
 +#include "hif-ops.h"
 +
 +unsigned int debug_mask;
 +
 +module_param(debug_mask, uint, 0644);
 +
 +/*
 + * Include definitions here that can be used to tune the WLAN module
 + * behavior. Different customers can tune the behavior as per their needs,
 + * here.
 + */
 +
 +/*
 + * This configuration item enable/disable keepalive support.
 + * Keepalive support: In the absence of any data traffic to AP, null
 + * frames will be sent to the AP at periodic interval, to keep the association
 + * active. This configuration item defines the periodic interval.
 + * Use value of zero to disable keepalive support
 + * Default: 60 seconds
 + */
 +#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
 +
 +/*
 + * This configuration item sets the value of disconnect timeout
 + * Firmware delays sending the disconnec event to the host for this
 + * timeout after is gets disconnected from the current AP.
 + * If the firmware successly roams within the disconnect timeout
 + * it sends a new connect event
 + */
 +#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
 +
 +#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
 +
 +enum addr_type {
 +      DATASET_PATCH_ADDR,
 +      APP_LOAD_ADDR,
 +      APP_START_OVERRIDE_ADDR,
 +};
 +
 +#define ATH6KL_DATA_OFFSET    64
 +struct sk_buff *ath6kl_buf_alloc(int size)
 +{
 +      struct sk_buff *skb;
 +      u16 reserved;
 +
 +      /* Add chacheline space at front and back of buffer */
 +      reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
 +                 sizeof(struct htc_packet);
 +      skb = dev_alloc_skb(size + reserved);
 +
 +      if (skb)
 +              skb_reserve(skb, reserved - L1_CACHE_BYTES);
 +      return skb;
 +}
 +
 +void ath6kl_init_profile_info(struct ath6kl *ar)
 +{
 +      ar->ssid_len = 0;
 +      memset(ar->ssid, 0, sizeof(ar->ssid));
 +
 +      ar->dot11_auth_mode = OPEN_AUTH;
 +      ar->auth_mode = NONE_AUTH;
 +      ar->prwise_crypto = NONE_CRYPT;
 +      ar->prwise_crypto_len = 0;
 +      ar->grp_crypto = NONE_CRYPT;
 +      ar->grp_crpto_len = 0;
 +      memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
 +      memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
 +      memset(ar->bssid, 0, sizeof(ar->bssid));
 +      ar->bss_ch = 0;
 +      ar->nw_type = ar->next_mode = INFRA_NETWORK;
 +}
 +
 +static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
 +{
 +      switch (ar->nw_type) {
 +      case INFRA_NETWORK:
 +              return HI_OPTION_FW_MODE_BSS_STA;
 +      case ADHOC_NETWORK:
 +              return HI_OPTION_FW_MODE_IBSS;
 +      case AP_NETWORK:
 +              return HI_OPTION_FW_MODE_AP;
 +      default:
 +              ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
 +              return 0xff;
 +      }
 +}
 +
 +static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
 +                                        u32 item_offset)
 +{
 +      u32 addr = 0;
 +
 +      if (ar->target_type == TARGET_TYPE_AR6003)
 +              addr = ATH6KL_HI_START_ADDR + item_offset;
 +
 +      return addr;
 +}
 +
 +static int ath6kl_set_host_app_area(struct ath6kl *ar)
 +{
 +      u32 address, data;
 +      struct host_app_area host_app_area;
 +
 +      /* Fetch the address of the host_app_area_s
 +       * instance in the host interest area */
 +      address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest));
 +      address = TARG_VTOP(address);
 +
 +      if (ath6kl_read_reg_diag(ar, &address, &data))
 +              return -EIO;
 +
 +      address = TARG_VTOP(data);
 +      host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
 +      if (ath6kl_access_datadiag(ar, address,
 +                              (u8 *)&host_app_area,
 +                              sizeof(struct host_app_area), false))
 +              return -EIO;
 +
 +      return 0;
 +}
 +
 +static inline void set_ac2_ep_map(struct ath6kl *ar,
 +                                u8 ac,
 +                                enum htc_endpoint_id ep)
 +{
 +      ar->ac2ep_map[ac] = ep;
 +      ar->ep2ac_map[ep] = ac;
 +}
 +
 +/* connect to a service */
 +static int ath6kl_connectservice(struct ath6kl *ar,
 +                               struct htc_service_connect_req  *con_req,
 +                               char *desc)
 +{
 +      int status;
 +      struct htc_service_connect_resp response;
 +
 +      memset(&response, 0, sizeof(response));
 +
 +      status = ath6kl_htc_conn_service(ar->htc_target, con_req, &response);
 +      if (status) {
 +              ath6kl_err("failed to connect to %s service status:%d\n",
 +                         desc, status);
 +              return status;
 +      }
 +
 +      switch (con_req->svc_id) {
 +      case WMI_CONTROL_SVC:
 +              if (test_bit(WMI_ENABLED, &ar->flag))
 +                      ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint);
 +              ar->ctrl_ep = response.endpoint;
 +              break;
 +      case WMI_DATA_BE_SVC:
 +              set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint);
 +              break;
 +      case WMI_DATA_BK_SVC:
 +              set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint);
 +              break;
 +      case WMI_DATA_VI_SVC:
 +              set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint);
 +              break;
 +      case WMI_DATA_VO_SVC:
 +              set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint);
 +              break;
 +      default:
 +              ath6kl_err("service id is not mapped %d\n", con_req->svc_id);
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
 +static int ath6kl_init_service_ep(struct ath6kl *ar)
 +{
 +      struct htc_service_connect_req connect;
 +
 +      memset(&connect, 0, sizeof(connect));
 +
 +      /* these fields are the same for all service endpoints */
 +      connect.ep_cb.rx = ath6kl_rx;
 +      connect.ep_cb.rx_refill = ath6kl_rx_refill;
 +      connect.ep_cb.tx_full = ath6kl_tx_queue_full;
 +
 +      /*
 +       * Set the max queue depth so that our ath6kl_tx_queue_full handler
 +       * gets called.
 +      */
 +      connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
 +      connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4;
 +      if (!connect.ep_cb.rx_refill_thresh)
 +              connect.ep_cb.rx_refill_thresh++;
 +
 +      /* connect to control service */
 +      connect.svc_id = WMI_CONTROL_SVC;
 +      if (ath6kl_connectservice(ar, &connect, "WMI CONTROL"))
 +              return -EIO;
 +
 +      connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN;
 +
 +      /*
 +       * Limit the HTC message size on the send path, although e can
 +       * receive A-MSDU frames of 4K, we will only send ethernet-sized
 +       * (802.3) frames on the send path.
 +       */
 +      connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH;
 +
 +      /*
 +       * To reduce the amount of committed memory for larger A_MSDU
 +       * frames, use the recv-alloc threshold mechanism for larger
 +       * packets.
 +       */
 +      connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE;
 +      connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf;
 +
 +      /*
 +       * For the remaining data services set the connection flag to
 +       * reduce dribbling, if configured to do so.
 +       */
 +      connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB;
 +      connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK;
 +      connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF;
 +
 +      connect.svc_id = WMI_DATA_BE_SVC;
 +
 +      if (ath6kl_connectservice(ar, &connect, "WMI DATA BE"))
 +              return -EIO;
 +
 +      /* connect to back-ground map this to WMI LOW_PRI */
 +      connect.svc_id = WMI_DATA_BK_SVC;
 +      if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
 +              return -EIO;
 +
 +      /* connect to Video service, map this to to HI PRI */
 +      connect.svc_id = WMI_DATA_VI_SVC;
 +      if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
 +              return -EIO;
 +
 +      /*
 +       * Connect to VO service, this is currently not mapped to a WMI
 +       * priority stream due to historical reasons. WMI originally
 +       * defined 3 priorities over 3 mailboxes We can change this when
 +       * WMI is reworked so that priorities are not dependent on
 +       * mailboxes.
 +       */
 +      connect.svc_id = WMI_DATA_VO_SVC;
 +      if (ath6kl_connectservice(ar, &connect, "WMI DATA VO"))
 +              return -EIO;
 +
 +      return 0;
 +}
 +
 +static void ath6kl_init_control_info(struct ath6kl *ar)
 +{
 +      u8 ctr;
 +
 +      clear_bit(WMI_ENABLED, &ar->flag);
 +      ath6kl_init_profile_info(ar);
 +      ar->def_txkey_index = 0;
 +      memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
 +      ar->ch_hint = 0;
 +      ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
 +      ar->listen_intvl_b = 0;
 +      ar->tx_pwr = 0;
 +      clear_bit(SKIP_SCAN, &ar->flag);
 +      set_bit(WMM_ENABLED, &ar->flag);
 +      ar->intra_bss = 1;
 +      memset(&ar->sc_params, 0, sizeof(ar->sc_params));
 +      ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
 +      ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
 +
 +      memset((u8 *)ar->sta_list, 0,
 +             AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
 +
 +      spin_lock_init(&ar->mcastpsq_lock);
 +
 +      /* Init the PS queues */
 +      for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
 +              spin_lock_init(&ar->sta_list[ctr].psq_lock);
 +              skb_queue_head_init(&ar->sta_list[ctr].psq);
 +      }
 +
 +      skb_queue_head_init(&ar->mcastpsq);
 +
 +      memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
 +}
 +
 +/*
 + * Set HTC/Mbox operational parameters, this can only be called when the
 + * target is in the BMI phase.
 + */
 +static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
 +                               u8 htc_ctrl_buf)
 +{
 +      int status;
 +      u32 blk_size;
 +
 +      blk_size = ar->mbox_info.block_size;
 +
 +      if (htc_ctrl_buf)
 +              blk_size |=  ((u32)htc_ctrl_buf) << 16;
 +
 +      /* set the host interest area for the block size */
 +      status = ath6kl_bmi_write(ar,
 +                      ath6kl_get_hi_item_addr(ar,
 +                      HI_ITEM(hi_mbox_io_block_sz)),
 +                      (u8 *)&blk_size,
 +                      4);
 +      if (status) {
 +              ath6kl_err("bmi_write_memory for IO block size failed\n");
 +              goto out;
 +      }
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n",
 +                 blk_size,
 +                 ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz)));
 +
 +      if (mbox_isr_yield_val) {
 +              /* set the host interest area for the mbox ISR yield limit */
 +              status = ath6kl_bmi_write(ar,
 +                              ath6kl_get_hi_item_addr(ar,
 +                              HI_ITEM(hi_mbox_isr_yield_limit)),
 +                              (u8 *)&mbox_isr_yield_val,
 +                              4);
 +              if (status) {
 +                      ath6kl_err("bmi_write_memory for yield limit failed\n");
 +                      goto out;
 +              }
 +      }
 +
 +out:
 +      return status;
 +}
 +
 +#define REG_DUMP_COUNT_AR6003   60
 +#define REGISTER_DUMP_LEN_MAX   60
 +
 +static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
 +{
 +      u32 address;
 +      u32 regdump_loc = 0;
 +      int status;
 +      u32 regdump_val[REGISTER_DUMP_LEN_MAX];
 +      u32 i;
 +
 +      if (ar->target_type != TARGET_TYPE_AR6003)
 +              return;
 +
 +      /* the reg dump pointer is copied to the host interest area */
 +      address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
 +      address = TARG_VTOP(address);
 +
 +      /* read RAM location through diagnostic window */
 +      status = ath6kl_read_reg_diag(ar, &address, &regdump_loc);
 +
 +      if (status || !regdump_loc) {
 +              ath6kl_err("failed to get ptr to register dump area\n");
 +              return;
 +      }
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
 +              regdump_loc);
 +
 +      regdump_loc = TARG_VTOP(regdump_loc);
 +
 +      /* fetch register dump data */
 +      status = ath6kl_access_datadiag(ar,
 +                                      regdump_loc,
 +                                      (u8 *)&regdump_val[0],
 +                                      REG_DUMP_COUNT_AR6003 * (sizeof(u32)),
 +                                      true);
 +
 +      if (status) {
 +              ath6kl_err("failed to get register dump\n");
 +              return;
 +      }
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
 +
 +      for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
 +              ath6kl_dbg(ATH6KL_DBG_TRC, " %d :  0x%8.8X\n",
 +                         i, regdump_val[i]);
 +
 +}
 +
 +void ath6kl_target_failure(struct ath6kl *ar)
 +{
 +      ath6kl_err("target asserted\n");
 +
 +      /* try dumping target assertion information (if any) */
 +      ath6kl_dump_target_assert_info(ar);
 +
 +}
 +
 +static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
 +{
 +      int status = 0;
 +
 +      /*
 +       * Configure the device for rx dot11 header rules. "0,0" are the
 +       * default values. Required if checksum offload is needed. Set
 +       * RxMetaVersion to 2.
 +       */
 +      if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
 +                                             ar->rx_meta_ver, 0, 0)) {
 +              ath6kl_err("unable to set the rx frame format\n");
 +              status = -EIO;
 +      }
 +
 +      if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
 +              if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
 +                   IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
 +                      ath6kl_err("unable to set power save fail event policy\n");
 +                      status = -EIO;
 +              }
 +
 +      if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
 +              if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
 +                   WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
 +                      ath6kl_err("unable to set barker preamble policy\n");
 +                      status = -EIO;
 +              }
 +
 +      if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
 +                      WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
 +              ath6kl_err("unable to set keep alive interval\n");
 +              status = -EIO;
 +      }
 +
 +      if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
 +                      WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
 +              ath6kl_err("unable to set disconnect timeout\n");
 +              status = -EIO;
 +      }
 +
 +      if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
 +              if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
 +                      ath6kl_err("unable to set txop bursting\n");
 +                      status = -EIO;
 +              }
 +
 +      return status;
 +}
 +
 +int ath6kl_configure_target(struct ath6kl *ar)
 +{
 +      u32 param, ram_reserved_size;
 +      u8 fw_iftype;
 +
 +      fw_iftype = ath6kl_get_fw_iftype(ar);
 +      if (fw_iftype == 0xff)
 +              return -EINVAL;
 +
 +      /* Tell target which HTC version it is used*/
 +      param = HTC_PROTOCOL_VERSION;
 +      if (ath6kl_bmi_write(ar,
 +                           ath6kl_get_hi_item_addr(ar,
 +                           HI_ITEM(hi_app_host_interest)),
 +                           (u8 *)&param, 4) != 0) {
 +              ath6kl_err("bmi_write_memory for htc version failed\n");
 +              return -EIO;
 +      }
 +
 +      /* set the firmware mode to STA/IBSS/AP */
 +      param = 0;
 +
 +      if (ath6kl_bmi_read(ar,
 +                          ath6kl_get_hi_item_addr(ar,
 +                          HI_ITEM(hi_option_flag)),
 +                          (u8 *)&param, 4) != 0) {
 +              ath6kl_err("bmi_read_memory for setting fwmode failed\n");
 +              return -EIO;
 +      }
 +
 +      param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
 +      param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
 +      param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
 +      param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
 +
 +      if (ath6kl_bmi_write(ar,
 +                           ath6kl_get_hi_item_addr(ar,
 +                           HI_ITEM(hi_option_flag)),
 +                           (u8 *)&param,
 +                           4) != 0) {
 +              ath6kl_err("bmi_write_memory for setting fwmode failed\n");
 +              return -EIO;
 +      }
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n");
 +
 +      /*
 +       * Hardcode the address use for the extended board data
 +       * Ideally this should be pre-allocate by the OS at boot time
 +       * But since it is a new feature and board data is loaded
 +       * at init time, we have to workaround this from host.
 +       * It is difficult to patch the firmware boot code,
 +       * but possible in theory.
 +       */
 +
 +      if (ar->target_type == TARGET_TYPE_AR6003) {
 +              if (ar->version.target_ver == AR6003_REV2_VERSION) {
 +                      param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
 +                      ram_reserved_size =  AR6003_REV2_RAM_RESERVE_SIZE;
 +              } else {
 +                      param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
 +                      ram_reserved_size =  AR6003_REV3_RAM_RESERVE_SIZE;
 +              }
 +
 +              if (ath6kl_bmi_write(ar,
 +                                   ath6kl_get_hi_item_addr(ar,
 +                                   HI_ITEM(hi_board_ext_data)),
 +                                   (u8 *)&param, 4) != 0) {
 +                      ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
 +                      return -EIO;
 +              }
 +              if (ath6kl_bmi_write(ar,
 +                                   ath6kl_get_hi_item_addr(ar,
 +                                   HI_ITEM(hi_end_ram_reserve_sz)),
 +                                   (u8 *)&ram_reserved_size, 4) != 0) {
 +                      ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
 +                      return -EIO;
 +              }
 +      }
 +
 +      /* set the block size for the target */
 +      if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0))
 +              /* use default number of control buffers */
 +              return -EIO;
 +
 +      return 0;
 +}
 +
 +struct ath6kl *ath6kl_core_alloc(struct device *sdev)
 +{
 +      struct net_device *dev;
 +      struct ath6kl *ar;
 +      struct wireless_dev *wdev;
 +
 +      wdev = ath6kl_cfg80211_init(sdev);
 +      if (!wdev) {
 +              ath6kl_err("ath6kl_cfg80211_init failed\n");
 +              return NULL;
 +      }
 +
 +      ar = wdev_priv(wdev);
 +      ar->dev = sdev;
 +      ar->wdev = wdev;
 +      wdev->iftype = NL80211_IFTYPE_STATION;
 +
 +      dev = alloc_netdev(0, "wlan%d", ether_setup);
 +      if (!dev) {
 +              ath6kl_err("no memory for network device instance\n");
 +              ath6kl_cfg80211_deinit(ar);
 +              return NULL;
 +      }
 +
 +      dev->ieee80211_ptr = wdev;
 +      SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
 +      wdev->netdev = dev;
 +      ar->sme_state = SME_DISCONNECTED;
 +      ar->auto_auth_stage = AUTH_IDLE;
 +
 +      init_netdev(dev);
 +
 +      ar->net_dev = dev;
 +      set_bit(WLAN_ENABLED, &ar->flag);
 +
 +      ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
 +
 +      spin_lock_init(&ar->lock);
 +
 +      ath6kl_init_control_info(ar);
 +      init_waitqueue_head(&ar->event_wq);
 +      sema_init(&ar->sem, 1);
 +      clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
 +
 +      INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
 +
 +      setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
 +                  (unsigned long) dev);
 +
 +      return ar;
 +}
 +
 +int ath6kl_unavail_ev(struct ath6kl *ar)
 +{
 +      ath6kl_destroy(ar->net_dev, 1);
 +
 +      return 0;
 +}
 +
 +/* firmware upload */
 +static u32 ath6kl_get_load_address(u32 target_ver, enum addr_type type)
 +{
 +      WARN_ON(target_ver != AR6003_REV2_VERSION &&
 +              target_ver != AR6003_REV3_VERSION);
 +
 +      switch (type) {
 +      case DATASET_PATCH_ADDR:
 +              return (target_ver == AR6003_REV2_VERSION) ?
 +                      AR6003_REV2_DATASET_PATCH_ADDRESS :
 +                      AR6003_REV3_DATASET_PATCH_ADDRESS;
 +      case APP_LOAD_ADDR:
 +              return (target_ver == AR6003_REV2_VERSION) ?
 +                      AR6003_REV2_APP_LOAD_ADDRESS :
 +                      0x1234;
 +      case APP_START_OVERRIDE_ADDR:
 +              return (target_ver == AR6003_REV2_VERSION) ?
 +                      AR6003_REV2_APP_START_OVERRIDE :
 +                      AR6003_REV3_APP_START_OVERRIDE;
 +      default:
 +              return 0;
 +      }
 +}
 +
 +static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
 +                       u8 **fw, size_t *fw_len)
 +{
 +      const struct firmware *fw_entry;
 +      int ret;
 +
 +      ret = request_firmware(&fw_entry, filename, ar->dev);
 +      if (ret)
 +              return ret;
 +
 +      *fw_len = fw_entry->size;
 +      *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
 +
 +      if (*fw == NULL)
 +              ret = -ENOMEM;
 +
 +      release_firmware(fw_entry);
 +
 +      return ret;
 +}
 +
 +static int ath6kl_fetch_board_file(struct ath6kl *ar)
 +{
 +      const char *filename;
 +      int ret;
 +
 +      switch (ar->version.target_ver) {
 +      case AR6003_REV2_VERSION:
 +              filename = AR6003_REV2_BOARD_DATA_FILE;
 +              break;
 +      default:
 +              filename = AR6003_REV3_BOARD_DATA_FILE;
 +              break;
 +      }
 +
 +      ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
 +                          &ar->fw_board_len);
 +      if (ret == 0) {
 +              /* managed to get proper board file */
 +              return 0;
 +      }
 +
 +      /* there was no proper board file, try to use default instead */
 +      ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
 +                  filename, ret);
 +
 +      switch (ar->version.target_ver) {
 +      case AR6003_REV2_VERSION:
 +              filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
 +              break;
 +      default:
 +              filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
 +              break;
 +      }
 +
 +      ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
 +                          &ar->fw_board_len);
 +      if (ret) {
 +              ath6kl_err("Failed to get default board file %s: %d\n",
 +                         filename, ret);
 +              return ret;
 +      }
 +
 +      ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n");
 +      ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n");
 +
 +      return 0;
 +}
 +
 +
 +static int ath6kl_upload_board_file(struct ath6kl *ar)
 +{
 +      u32 board_address, board_ext_address, param;
 +      int ret;
 +
 +      if (ar->fw_board == NULL) {
 +              ret = ath6kl_fetch_board_file(ar);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      /* Determine where in Target RAM to write Board Data */
 +      ath6kl_bmi_read(ar,
 +                      ath6kl_get_hi_item_addr(ar,
 +                      HI_ITEM(hi_board_data)),
 +                      (u8 *) &board_address, 4);
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "board data download addr: 0x%x\n",
 +                 board_address);
 +
 +      /* determine where in target ram to write extended board data */
 +      ath6kl_bmi_read(ar,
 +                      ath6kl_get_hi_item_addr(ar,
 +                      HI_ITEM(hi_board_ext_data)),
 +                      (u8 *) &board_ext_address, 4);
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "board file download addr: 0x%x\n",
 +                 board_ext_address);
 +
 +      if (board_ext_address == 0) {
 +              ath6kl_err("Failed to get board file target address.\n");
 +              return -EINVAL;
 +      }
 +
 +      if (ar->fw_board_len == (AR6003_BOARD_DATA_SZ +
 +                               AR6003_BOARD_EXT_DATA_SZ)) {
 +              /* write extended board data */
 +              ret = ath6kl_bmi_write(ar, board_ext_address,
 +                                     ar->fw_board + AR6003_BOARD_DATA_SZ,
 +                                     AR6003_BOARD_EXT_DATA_SZ);
 +
 +              if (ret) {
 +                      ath6kl_err("Failed to write extended board data: %d\n",
 +                                 ret);
 +                      return ret;
 +              }
 +
 +              /* record that extended board data is initialized */
 +              param = (AR6003_BOARD_EXT_DATA_SZ << 16) | 1;
 +              ath6kl_bmi_write(ar,
 +                               ath6kl_get_hi_item_addr(ar,
 +                               HI_ITEM(hi_board_ext_data_config)),
 +                               (unsigned char *) &param, 4);
 +      }
 +
 +      if (ar->fw_board_len < AR6003_BOARD_DATA_SZ) {
 +              ath6kl_err("Too small board file: %zu\n", ar->fw_board_len);
 +              ret = -EINVAL;
 +              return ret;
 +      }
 +
 +      ret = ath6kl_bmi_write(ar, board_address, ar->fw_board,
 +                             AR6003_BOARD_DATA_SZ);
 +
 +      if (ret) {
 +              ath6kl_err("Board file bmi write failed: %d\n", ret);
 +              return ret;
 +      }
 +
 +      /* record the fact that Board Data IS initialized */
 +      param = 1;
 +      ath6kl_bmi_write(ar,
 +                       ath6kl_get_hi_item_addr(ar,
 +                       HI_ITEM(hi_board_data_initialized)),
 +                       (u8 *)&param, 4);
 +
 +      return ret;
 +}
 +
 +static int ath6kl_upload_otp(struct ath6kl *ar)
 +{
 +      const char *filename;
 +      u32 address, param;
 +      int ret;
 +
 +      switch (ar->version.target_ver) {
 +      case AR6003_REV2_VERSION:
 +              filename = AR6003_REV2_OTP_FILE;
 +              break;
 +      default:
 +              filename = AR6003_REV3_OTP_FILE;
 +              break;
 +      }
 +
 +      if (ar->fw_otp == NULL) {
 +              ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
 +                                  &ar->fw_otp_len);
 +              if (ret) {
 +                      ath6kl_err("Failed to get OTP file %s: %d\n",
 +                                 filename, ret);
 +                      return ret;
 +              }
 +      }
 +
 +      address = ath6kl_get_load_address(ar->version.target_ver,
 +                                        APP_LOAD_ADDR);
 +
 +      ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp,
 +                                     ar->fw_otp_len);
 +      if (ret) {
 +              ath6kl_err("Failed to upload OTP file: %d\n", ret);
 +              return ret;
 +      }
 +
 +      /* execute the OTP code */
 +      param = 0;
 +      address = ath6kl_get_load_address(ar->version.target_ver,
 +                                        APP_START_OVERRIDE_ADDR);
 +      ath6kl_bmi_execute(ar, address, &param);
 +
 +      return ret;
 +}
 +
 +static int ath6kl_upload_firmware(struct ath6kl *ar)
 +{
 +      const char *filename;
 +      u32 address;
 +      int ret;
 +
 +      switch (ar->version.target_ver) {
 +      case AR6003_REV2_VERSION:
 +              filename = AR6003_REV2_FIRMWARE_FILE;
 +              break;
 +      default:
 +              filename = AR6003_REV3_FIRMWARE_FILE;
 +              break;
 +      }
 +
 +      if (ar->fw == NULL) {
 +              ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
 +              if (ret) {
 +                      ath6kl_err("Failed to get firmware file %s: %d\n",
 +                                 filename, ret);
 +                      return ret;
 +              }
 +      }
 +
 +      address = ath6kl_get_load_address(ar->version.target_ver,
 +                                        APP_LOAD_ADDR);
 +
 +      ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len);
 +
 +      if (ret) {
 +              ath6kl_err("Failed to write firmware: %d\n", ret);
 +              return ret;
 +      }
 +
 +      /* Set starting address for firmware */
 +      address = ath6kl_get_load_address(ar->version.target_ver,
 +                                        APP_START_OVERRIDE_ADDR);
 +      ath6kl_bmi_set_app_start(ar, address);
 +
 +      return ret;
 +}
 +
 +static int ath6kl_upload_patch(struct ath6kl *ar)
 +{
 +      const char *filename;
 +      u32 address, param;
 +      int ret;
 +
 +      switch (ar->version.target_ver) {
 +      case AR6003_REV2_VERSION:
 +              filename = AR6003_REV2_PATCH_FILE;
 +              break;
 +      default:
 +              filename = AR6003_REV3_PATCH_FILE;
 +              break;
 +      }
 +
 +      if (ar->fw_patch == NULL) {
 +              ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
 +                                  &ar->fw_patch_len);
 +              if (ret) {
 +                      ath6kl_err("Failed to get patch file %s: %d\n",
 +                                 filename, ret);
 +                      return ret;
 +              }
 +      }
 +
 +      address = ath6kl_get_load_address(ar->version.target_ver,
 +                                        DATASET_PATCH_ADDR);
 +
 +      ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len);
 +      if (ret) {
 +              ath6kl_err("Failed to write patch file: %d\n", ret);
 +              return ret;
 +      }
 +
 +      param = address;
 +      ath6kl_bmi_write(ar,
 +                       ath6kl_get_hi_item_addr(ar,
 +                       HI_ITEM(hi_dset_list_head)),
 +                       (unsigned char *) &param, 4);
 +
 +      return 0;
 +}
 +
 +static int ath6kl_init_upload(struct ath6kl *ar)
 +{
 +      u32 param, options, sleep, address;
 +      int status = 0;
 +
 +      if (ar->target_type != TARGET_TYPE_AR6003)
 +              return -EINVAL;
 +
 +      /* temporarily disable system sleep */
 +      address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
 +      status = ath6kl_bmi_reg_read(ar, address, &param);
 +      if (status)
 +              return status;
 +
 +      options = param;
 +
 +      param |= ATH6KL_OPTION_SLEEP_DISABLE;
 +      status = ath6kl_bmi_reg_write(ar, address, param);
 +      if (status)
 +              return status;
 +
 +      address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
 +      status = ath6kl_bmi_reg_read(ar, address, &param);
 +      if (status)
 +              return status;
 +
 +      sleep = param;
 +
 +      param |= SM(SYSTEM_SLEEP_DISABLE, 1);
 +      status = ath6kl_bmi_reg_write(ar, address, param);
 +      if (status)
 +              return status;
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n",
 +                 options, sleep);
 +
 +      /* program analog PLL register */
 +      status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER,
 +                                    0xF9104001);
 +      if (status)
 +              return status;
 +
 +      /* Run at 80/88MHz by default */
 +      param = SM(CPU_CLOCK_STANDARD, 1);
 +
 +      address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
 +      status = ath6kl_bmi_reg_write(ar, address, param);
 +      if (status)
 +              return status;
 +
 +      param = 0;
 +      address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
 +      param = SM(LPO_CAL_ENABLE, 1);
 +      status = ath6kl_bmi_reg_write(ar, address, param);
 +      if (status)
 +              return status;
 +
 +      /* WAR to avoid SDIO CRC err */
 +      if (ar->version.target_ver == AR6003_REV2_VERSION) {
 +              ath6kl_err("temporary war to avoid sdio crc error\n");
 +
 +              param = 0x20;
 +
 +              address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
 +              status = ath6kl_bmi_reg_write(ar, address, param);
 +              if (status)
 +                      return status;
 +
 +              address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
 +              status = ath6kl_bmi_reg_write(ar, address, param);
 +              if (status)
 +                      return status;
 +
 +              address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
 +              status = ath6kl_bmi_reg_write(ar, address, param);
 +              if (status)
 +                      return status;
 +
 +              address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
 +              status = ath6kl_bmi_reg_write(ar, address, param);
 +              if (status)
 +                      return status;
 +      }
 +
 +      /* write EEPROM data to Target RAM */
 +      status = ath6kl_upload_board_file(ar);
 +      if (status)
 +              return status;
 +
 +      /* transfer One time Programmable data */
 +      status = ath6kl_upload_otp(ar);
 +      if (status)
 +              return status;
 +
 +      /* Download Target firmware */
 +      status = ath6kl_upload_firmware(ar);
 +      if (status)
 +              return status;
 +
 +      status = ath6kl_upload_patch(ar);
 +      if (status)
 +              return status;
 +
 +      /* Restore system sleep */
 +      address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
 +      status = ath6kl_bmi_reg_write(ar, address, sleep);
 +      if (status)
 +              return status;
 +
 +      address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
 +      param = options | 0x20;
 +      status = ath6kl_bmi_reg_write(ar, address, param);
 +      if (status)
 +              return status;
 +
 +      /* Configure GPIO AR6003 UART */
 +      param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
 +      status = ath6kl_bmi_write(ar,
 +                                ath6kl_get_hi_item_addr(ar,
 +                                HI_ITEM(hi_dbg_uart_txpin)),
 +                                (u8 *)&param, 4);
 +
 +      return status;
 +}
 +
 +static int ath6kl_init(struct net_device *dev)
 +{
 +      struct ath6kl *ar = ath6kl_priv(dev);
 +      int status = 0;
 +      s32 timeleft;
 +
 +      if (!ar)
 +              return -EIO;
 +
 +      /* Do we need to finish the BMI phase */
 +      if (ath6kl_bmi_done(ar)) {
 +              status = -EIO;
 +              goto ath6kl_init_done;
 +      }
 +
 +      /* Indicate that WMI is enabled (although not ready yet) */
 +      set_bit(WMI_ENABLED, &ar->flag);
 +      ar->wmi = ath6kl_wmi_init(ar);
 +      if (!ar->wmi) {
 +              ath6kl_err("failed to initialize wmi\n");
 +              status = -EIO;
 +              goto ath6kl_init_done;
 +      }
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
 +
 +      wlan_node_table_init(&ar->scan_table);
 +
 +      /*
 +       * The reason we have to wait for the target here is that the
 +       * driver layer has to init BMI in order to set the host block
 +       * size.
 +       */
 +      if (ath6kl_htc_wait_target(ar->htc_target)) {
 +              status = -EIO;
 +              goto err_node_cleanup;
 +      }
 +
 +      if (ath6kl_init_service_ep(ar)) {
 +              status = -EIO;
 +              goto err_cleanup_scatter;
 +      }
 +
 +      /* setup access class priority mappings */
 +      ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest  */
 +      ar->ac_stream_pri_map[WMM_AC_BE] = 1;
 +      ar->ac_stream_pri_map[WMM_AC_VI] = 2;
 +      ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
 +
 +      /* give our connected endpoints some buffers */
 +      ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
 +      ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
 +
 +      /* allocate some buffers that handle larger AMSDU frames */
 +      ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
 +
 +      /* setup credit distribution */
 +      ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
 +
 +      ath6kl_cookie_init(ar);
 +
 +      /* start HTC */
 +      status = ath6kl_htc_start(ar->htc_target);
 +
 +      if (status) {
 +              ath6kl_cookie_cleanup(ar);
 +              goto err_rxbuf_cleanup;
 +      }
 +
 +      /* Wait for Wmi event to be ready */
 +      timeleft = wait_event_interruptible_timeout(ar->event_wq,
 +                                                  test_bit(WMI_READY,
 +                                                           &ar->flag),
 +                                                  WMI_TIMEOUT);
 +
 +      if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
 +              ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
 +                         ATH6KL_ABI_VERSION, ar->version.abi_ver);
 +              status = -EIO;
 +              goto err_htc_stop;
 +      }
 +
 +      if (!timeleft || signal_pending(current)) {
 +              ath6kl_err("wmi is not ready or wait was interrupted\n");
 +              status = -EIO;
 +              goto err_htc_stop;
 +      }
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
 +
 +      /* communicate the wmi protocol verision to the target */
 +      if ((ath6kl_set_host_app_area(ar)) != 0)
 +              ath6kl_err("unable to set the host app area\n");
 +
 +      ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
 +                       ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
 +
 +      status = ath6kl_target_config_wlan_params(ar);
 +      if (!status)
 +              goto ath6kl_init_done;
 +
 +err_htc_stop:
 +      ath6kl_htc_stop(ar->htc_target);
 +err_rxbuf_cleanup:
 +      ath6kl_htc_flush_rx_buf(ar->htc_target);
 +      ath6kl_cleanup_amsdu_rxbufs(ar);
 +err_cleanup_scatter:
 +      ath6kl_hif_cleanup_scatter(ar);
 +err_node_cleanup:
 +      wlan_node_table_cleanup(&ar->scan_table);
 +      ath6kl_wmi_shutdown(ar->wmi);
 +      clear_bit(WMI_ENABLED, &ar->flag);
 +      ar->wmi = NULL;
 +
 +ath6kl_init_done:
 +      return status;
 +}
 +
 +int ath6kl_core_init(struct ath6kl *ar)
 +{
 +      int ret = 0;
 +      struct ath6kl_bmi_target_info targ_info;
 +
 +      ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
 +      if (!ar->ath6kl_wq)
 +              return -ENOMEM;
 +
 +      ret = ath6kl_bmi_init(ar);
 +      if (ret)
 +              goto err_wq;
 +
 +      ret = ath6kl_bmi_get_target_info(ar, &targ_info);
 +      if (ret)
 +              goto err_bmi_cleanup;
 +
 +      ar->version.target_ver = le32_to_cpu(targ_info.version);
 +      ar->target_type = le32_to_cpu(targ_info.type);
 +      ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
 +
 +      ret = ath6kl_configure_target(ar);
 +      if (ret)
 +              goto err_bmi_cleanup;
 +
 +      ar->htc_target = ath6kl_htc_create(ar);
 +
 +      if (!ar->htc_target) {
 +              ret = -ENOMEM;
 +              goto err_bmi_cleanup;
 +      }
 +
 +      ar->aggr_cntxt = aggr_init(ar->net_dev);
 +      if (!ar->aggr_cntxt) {
 +              ath6kl_err("failed to initialize aggr\n");
 +              ret = -ENOMEM;
 +              goto err_htc_cleanup;
 +      }
 +
 +      ret = ath6kl_init_upload(ar);
 +      if (ret)
 +              goto err_htc_cleanup;
 +
 +      ret = ath6kl_init(ar->net_dev);
 +      if (ret)
 +              goto err_htc_cleanup;
 +
 +      /* This runs the init function if registered */
 +      ret = register_netdev(ar->net_dev);
 +      if (ret) {
 +              ath6kl_err("register_netdev failed\n");
 +              ath6kl_destroy(ar->net_dev, 0);
 +              return ret;
 +      }
 +
 +      set_bit(NETDEV_REGISTERED, &ar->flag);
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
 +                      __func__, ar->net_dev->name, ar->net_dev, ar);
 +
 +      return ret;
 +
 +err_htc_cleanup:
 +      ath6kl_htc_cleanup(ar->htc_target);
 +err_bmi_cleanup:
 +      ath6kl_bmi_cleanup(ar);
 +err_wq:
 +      destroy_workqueue(ar->ath6kl_wq);
 +      return ret;
 +}
 +
 +void ath6kl_stop_txrx(struct ath6kl *ar)
 +{
 +      struct net_device *ndev = ar->net_dev;
 +
 +      if (!ndev)
 +              return;
 +
 +      set_bit(DESTROY_IN_PROGRESS, &ar->flag);
 +
 +      if (down_interruptible(&ar->sem)) {
 +              ath6kl_err("down_interruptible failed\n");
 +              return;
 +      }
 +
 +      if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
 +              ath6kl_stop_endpoint(ndev, false, true);
 +
 +      clear_bit(WLAN_ENABLED, &ar->flag);
 +}
 +
 +/*
 + * We need to differentiate between the surprise and planned removal of the
 + * device because of the following consideration:
 + *
 + * - In case of surprise removal, the hcd already frees up the pending
 + *   for the device and hence there is no need to unregister the function
 + *   driver inorder to get these requests. For planned removal, the function
 + *   driver has to explicitly unregister itself to have the hcd return all the
 + *   pending requests before the data structures for the devices are freed up.
 + *   Note that as per the current implementation, the function driver will
 + *   end up releasing all the devices since there is no API to selectively
 + *   release a particular device.
 + *
 + * - Certain commands issued to the target can be skipped for surprise
 + *   removal since they will anyway not go through.
 + */
 +void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
 +{
 +      struct ath6kl *ar;
 +
 +      if (!dev || !ath6kl_priv(dev)) {
 +              ath6kl_err("failed to get device structure\n");
 +              return;
 +      }
 +
 +      ar = ath6kl_priv(dev);
 +
 +      destroy_workqueue(ar->ath6kl_wq);
 +
 +      if (ar->htc_target)
 +              ath6kl_htc_cleanup(ar->htc_target);
 +
 +      aggr_module_destroy(ar->aggr_cntxt);
 +
 +      ath6kl_cookie_cleanup(ar);
 +
 +      ath6kl_cleanup_amsdu_rxbufs(ar);
 +
 +      ath6kl_bmi_cleanup(ar);
 +
 +      if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
 +              unregister_netdev(dev);
 +              clear_bit(NETDEV_REGISTERED, &ar->flag);
 +      }
 +
 +      free_netdev(dev);
 +
 +      wlan_node_table_cleanup(&ar->scan_table);
 +
 +      kfree(ar->fw_board);
 +      kfree(ar->fw_otp);
 +      kfree(ar->fw);
 +      kfree(ar->fw_patch);
 +
 +      ath6kl_cfg80211_deinit(ar);
 +}
index 34171604cbe409e65f4a8389a5149443b76f5b80,0000000000000000000000000000000000000000..ba89e235905b181efb6841390c1fa506e7ce372f
mode 100644,000000..100644
--- /dev/null
@@@ -1,912 -1,0 +1,913 @@@
 +/*
 + * Copyright (c) 2004-2011 Atheros Communications Inc.
 + *
 + * Permission to use, copy, modify, and/or distribute this software for any
 + * purpose with or without fee is hereby granted, provided that the above
 + * copyright notice and this permission notice appear in all copies.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 + */
 +
 +#include <linux/mmc/card.h>
 +#include <linux/mmc/mmc.h>
 +#include <linux/mmc/host.h>
 +#include <linux/mmc/sdio_func.h>
 +#include <linux/mmc/sdio_ids.h>
 +#include <linux/mmc/sdio.h>
 +#include <linux/mmc/sd.h>
++#include <linux/module.h>
 +#include "htc_hif.h"
 +#include "hif-ops.h"
 +#include "target.h"
 +#include "debug.h"
 +
 +struct ath6kl_sdio {
 +      struct sdio_func *func;
 +
 +      spinlock_t lock;
 +
 +      /* free list */
 +      struct list_head bus_req_freeq;
 +
 +      /* available bus requests */
 +      struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
 +
 +      struct ath6kl *ar;
 +      u8 *dma_buffer;
 +
 +      /* scatter request list head */
 +      struct list_head scat_req;
 +
 +      spinlock_t scat_lock;
 +      bool is_disabled;
 +      atomic_t irq_handling;
 +      const struct sdio_device_id *id;
 +      struct work_struct wr_async_work;
 +      struct list_head wr_asyncq;
 +      spinlock_t wr_async_lock;
 +};
 +
 +#define CMD53_ARG_READ          0
 +#define CMD53_ARG_WRITE         1
 +#define CMD53_ARG_BLOCK_BASIS   1
 +#define CMD53_ARG_FIXED_ADDRESS 0
 +#define CMD53_ARG_INCR_ADDRESS  1
 +
 +static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
 +{
 +      return ar->hif_priv;
 +}
 +
 +/*
 + * Macro to check if DMA buffer is WORD-aligned and DMA-able.
 + * Most host controllers assume the buffer is DMA'able and will
 + * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
 + * check fails on stack memory.
 + */
 +static inline bool buf_needs_bounce(u8 *buf)
 +{
 +      return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
 +}
 +
 +static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
 +{
 +      struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
 +
 +      /* EP1 has an extended range */
 +      mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
 +      mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
 +      mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
 +      mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
 +      mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
 +      mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
 +}
 +
 +static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
 +                                           u8 mode, u8 opcode, u32 addr,
 +                                           u16 blksz)
 +{
 +      *arg = (((rw & 1) << 31) |
 +              ((func & 0x7) << 28) |
 +              ((mode & 1) << 27) |
 +              ((opcode & 1) << 26) |
 +              ((addr & 0x1FFFF) << 9) |
 +              (blksz & 0x1FF));
 +}
 +
 +static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
 +                                           unsigned int address,
 +                                           unsigned char val)
 +{
 +      const u8 func = 0;
 +
 +      *arg = ((write & 1) << 31) |
 +             ((func & 0x7) << 28) |
 +             ((raw & 1) << 27) |
 +             (1 << 26) |
 +             ((address & 0x1FFFF) << 9) |
 +             (1 << 8) |
 +             (val & 0xFF);
 +}
 +
 +static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
 +                                         unsigned int address,
 +                                         unsigned char byte)
 +{
 +      struct mmc_command io_cmd;
 +
 +      memset(&io_cmd, 0, sizeof(io_cmd));
 +      ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
 +      io_cmd.opcode = SD_IO_RW_DIRECT;
 +      io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 +
 +      return mmc_wait_for_cmd(card->host, &io_cmd, 0);
 +}
 +
 +static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
 +                        u8 *buf, u32 len)
 +{
 +      int ret = 0;
 +
 +      if (request & HIF_WRITE) {
 +              if (addr >= HIF_MBOX_BASE_ADDR &&
 +                  addr <= HIF_MBOX_END_ADDR)
 +                      addr += (HIF_MBOX_WIDTH - len);
 +
 +              if (addr == HIF_MBOX0_EXT_BASE_ADDR)
 +                      addr += HIF_MBOX0_EXT_WIDTH - len;
 +
 +              if (request & HIF_FIXED_ADDRESS)
 +                      ret = sdio_writesb(func, addr, buf, len);
 +              else
 +                      ret = sdio_memcpy_toio(func, addr, buf, len);
 +      } else {
 +              if (request & HIF_FIXED_ADDRESS)
 +                      ret = sdio_readsb(func, buf, addr, len);
 +              else
 +                      ret = sdio_memcpy_fromio(func, buf, addr, len);
 +      }
 +
 +      return ret;
 +}
 +
 +static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
 +{
 +      struct bus_request *bus_req;
 +      unsigned long flag;
 +
 +      spin_lock_irqsave(&ar_sdio->lock, flag);
 +
 +      if (list_empty(&ar_sdio->bus_req_freeq)) {
 +              spin_unlock_irqrestore(&ar_sdio->lock, flag);
 +              return NULL;
 +      }
 +
 +      bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
 +                                 struct bus_request, list);
 +      list_del(&bus_req->list);
 +
 +      spin_unlock_irqrestore(&ar_sdio->lock, flag);
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
 +
 +      return bus_req;
 +}
 +
 +static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
 +                                   struct bus_request *bus_req)
 +{
 +      unsigned long flag;
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
 +
 +      spin_lock_irqsave(&ar_sdio->lock, flag);
 +      list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
 +      spin_unlock_irqrestore(&ar_sdio->lock, flag);
 +}
 +
 +static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
 +                                      struct mmc_data *data)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      data->blksz = HIF_MBOX_BLOCK_SIZE;
 +      data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
 +
 +      ath6kl_dbg(ATH6KL_DBG_SCATTER,
 +                 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
 +                 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
 +                 data->blksz, data->blocks, scat_req->len,
 +                 scat_req->scat_entries);
 +
 +      data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
 +                                                  MMC_DATA_READ;
 +
 +      /* fill SG entries */
 +      sg = scat_req->sgentries;
 +      sg_init_table(sg, scat_req->scat_entries);
 +
 +      /* assemble SG list */
 +      for (i = 0; i < scat_req->scat_entries; i++, sg++) {
 +              if ((unsigned long)scat_req->scat_list[i].buf & 0x3)
 +                      /*
 +                       * Some scatter engines can handle unaligned
 +                       * buffers, print this as informational only.
 +                       */
 +                      ath6kl_dbg(ATH6KL_DBG_SCATTER,
 +                                 "(%s) scatter buffer is unaligned 0x%p\n",
 +                                 scat_req->req & HIF_WRITE ? "WR" : "RD",
 +                                 scat_req->scat_list[i].buf);
 +
 +              ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
 +                         i, scat_req->scat_list[i].buf,
 +                         scat_req->scat_list[i].len);
 +
 +              sg_set_buf(sg, scat_req->scat_list[i].buf,
 +                         scat_req->scat_list[i].len);
 +      }
 +
 +      /* set scatter-gather table for request */
 +      data->sg = scat_req->sgentries;
 +      data->sg_len = scat_req->scat_entries;
 +}
 +
 +static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
 +                             struct bus_request *req)
 +{
 +      struct mmc_request mmc_req;
 +      struct mmc_command cmd;
 +      struct mmc_data data;
 +      struct hif_scatter_req *scat_req;
 +      u8 opcode, rw;
 +      int status, len;
 +
 +      scat_req = req->scat_req;
 +
 +      if (scat_req->virt_scat) {
 +              len = scat_req->len;
 +              if (scat_req->req & HIF_BLOCK_BASIS)
 +                      len = round_down(len, HIF_MBOX_BLOCK_SIZE);
 +
 +              status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
 +                                      scat_req->addr, scat_req->virt_dma_buf,
 +                                      len);
 +              goto scat_complete;
 +      }
 +
 +      memset(&mmc_req, 0, sizeof(struct mmc_request));
 +      memset(&cmd, 0, sizeof(struct mmc_command));
 +      memset(&data, 0, sizeof(struct mmc_data));
 +
 +      ath6kl_sdio_setup_scat_data(scat_req, &data);
 +
 +      opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
 +                CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
 +
 +      rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
 +
 +      /* Fixup the address so that the last byte will fall on MBOX EOM */
 +      if (scat_req->req & HIF_WRITE) {
 +              if (scat_req->addr == HIF_MBOX_BASE_ADDR)
 +                      scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
 +              else
 +                      /* Uses extended address range */
 +                      scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
 +      }
 +
 +      /* set command argument */
 +      ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
 +                                CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
 +                                data.blocks);
 +
 +      cmd.opcode = SD_IO_RW_EXTENDED;
 +      cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
 +
 +      mmc_req.cmd = &cmd;
 +      mmc_req.data = &data;
 +
 +      mmc_set_data_timeout(&data, ar_sdio->func->card);
 +      /* synchronous call to process request */
 +      mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
 +
 +      status = cmd.error ? cmd.error : data.error;
 +
 +scat_complete:
 +      scat_req->status = status;
 +
 +      if (scat_req->status)
 +              ath6kl_err("Scatter write request failed:%d\n",
 +                         scat_req->status);
 +
 +      if (scat_req->req & HIF_ASYNCHRONOUS)
 +              scat_req->complete(ar_sdio->ar->htc_target, scat_req);
 +
 +      return status;
 +}
 +
 +static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
 +                                         int n_scat_entry, int n_scat_req,
 +                                         bool virt_scat)
 +{
 +      struct hif_scatter_req *s_req;
 +      struct bus_request *bus_req;
 +      int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
 +      u8 *virt_buf;
 +
 +      scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
 +      scat_req_sz = sizeof(*s_req) + scat_list_sz;
 +
 +      if (!virt_scat)
 +              sg_sz = sizeof(struct scatterlist) * n_scat_entry;
 +      else
 +              buf_sz =  2 * L1_CACHE_BYTES +
 +                        ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
 +
 +      for (i = 0; i < n_scat_req; i++) {
 +              /* allocate the scatter request */
 +              s_req = kzalloc(scat_req_sz, GFP_KERNEL);
 +              if (!s_req)
 +                      return -ENOMEM;
 +
 +              if (virt_scat) {
 +                      virt_buf = kzalloc(buf_sz, GFP_KERNEL);
 +                      if (!virt_buf) {
 +                              kfree(s_req);
 +                              return -ENOMEM;
 +                      }
 +
 +                      s_req->virt_dma_buf =
 +                              (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
 +              } else {
 +                      /* allocate sglist */
 +                      s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
 +
 +                      if (!s_req->sgentries) {
 +                              kfree(s_req);
 +                              return -ENOMEM;
 +                      }
 +              }
 +
 +              /* allocate a bus request for this scatter request */
 +              bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
 +              if (!bus_req) {
 +                      kfree(s_req->sgentries);
 +                      kfree(s_req->virt_dma_buf);
 +                      kfree(s_req);
 +                      return -ENOMEM;
 +              }
 +
 +              /* assign the scatter request to this bus request */
 +              bus_req->scat_req = s_req;
 +              s_req->busrequest = bus_req;
 +
 +              s_req->virt_scat = virt_scat;
 +
 +              /* add it to the scatter pool */
 +              hif_scatter_req_add(ar_sdio->ar, s_req);
 +      }
 +
 +      return 0;
 +}
 +
 +static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
 +                                     u32 len, u32 request)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      u8  *tbuf = NULL;
 +      int ret;
 +      bool bounced = false;
 +
 +      if (request & HIF_BLOCK_BASIS)
 +              len = round_down(len, HIF_MBOX_BLOCK_SIZE);
 +
 +      if (buf_needs_bounce(buf)) {
 +              if (!ar_sdio->dma_buffer)
 +                      return -ENOMEM;
 +              tbuf = ar_sdio->dma_buffer;
 +              memcpy(tbuf, buf, len);
 +              bounced = true;
 +      } else
 +              tbuf = buf;
 +
 +      sdio_claim_host(ar_sdio->func);
 +      ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
 +      if ((request & HIF_READ) && bounced)
 +              memcpy(buf, tbuf, len);
 +      sdio_release_host(ar_sdio->func);
 +
 +      return ret;
 +}
 +
 +static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
 +                                    struct bus_request *req)
 +{
 +      if (req->scat_req)
 +              ath6kl_sdio_scat_rw(ar_sdio, req);
 +      else {
 +              void *context;
 +              int status;
 +
 +              status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
 +                                                   req->buffer, req->length,
 +                                                   req->request);
 +              context = req->packet;
 +              ath6kl_sdio_free_bus_req(ar_sdio, req);
 +              ath6kldev_rw_comp_handler(context, status);
 +      }
 +}
 +
 +static void ath6kl_sdio_write_async_work(struct work_struct *work)
 +{
 +      struct ath6kl_sdio *ar_sdio;
 +      unsigned long flags;
 +      struct bus_request *req, *tmp_req;
 +
 +      ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
 +      sdio_claim_host(ar_sdio->func);
 +
 +      spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
 +      list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
 +              list_del(&req->list);
 +              spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
 +              __ath6kl_sdio_write_async(ar_sdio, req);
 +              spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
 +      }
 +      spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
 +
 +      sdio_release_host(ar_sdio->func);
 +}
 +
 +static void ath6kl_sdio_irq_handler(struct sdio_func *func)
 +{
 +      int status;
 +      struct ath6kl_sdio *ar_sdio;
 +
 +      ar_sdio = sdio_get_drvdata(func);
 +      atomic_set(&ar_sdio->irq_handling, 1);
 +
 +      /*
 +       * Release the host during interrups so we can pick it back up when
 +       * we process commands.
 +       */
 +      sdio_release_host(ar_sdio->func);
 +
 +      status = ath6kldev_intr_bh_handler(ar_sdio->ar);
 +      sdio_claim_host(ar_sdio->func);
 +      atomic_set(&ar_sdio->irq_handling, 0);
 +      WARN_ON(status && status != -ECANCELED);
 +}
 +
 +static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
 +{
 +      struct sdio_func *func = ar_sdio->func;
 +      int ret = 0;
 +
 +      if (!ar_sdio->is_disabled)
 +              return 0;
 +
 +      sdio_claim_host(func);
 +
 +      ret = sdio_enable_func(func);
 +      if (ret) {
 +              ath6kl_err("Unable to enable sdio func: %d)\n", ret);
 +              sdio_release_host(func);
 +              return ret;
 +      }
 +
 +      sdio_release_host(func);
 +
 +      /*
 +       * Wait for hardware to initialise. It should take a lot less than
 +       * 10 ms but let's be conservative here.
 +       */
 +      msleep(10);
 +
 +      ar_sdio->is_disabled = false;
 +
 +      return ret;
 +}
 +
 +static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
 +{
 +      int ret;
 +
 +      if (ar_sdio->is_disabled)
 +              return 0;
 +
 +      /* Disable the card */
 +      sdio_claim_host(ar_sdio->func);
 +      ret = sdio_disable_func(ar_sdio->func);
 +      sdio_release_host(ar_sdio->func);
 +
 +      if (ret)
 +              return ret;
 +
 +      ar_sdio->is_disabled = true;
 +
 +      return ret;
 +}
 +
 +static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
 +                                 u32 length, u32 request,
 +                                 struct htc_packet *packet)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      struct bus_request *bus_req;
 +      unsigned long flags;
 +
 +      bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
 +
 +      if (!bus_req)
 +              return -ENOMEM;
 +
 +      bus_req->address = address;
 +      bus_req->buffer = buffer;
 +      bus_req->length = length;
 +      bus_req->request = request;
 +      bus_req->packet = packet;
 +
 +      spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
 +      list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
 +      spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
 +      queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
 +
 +      return 0;
 +}
 +
 +static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      int ret;
 +
 +      sdio_claim_host(ar_sdio->func);
 +
 +      /* Register the isr */
 +      ret =  sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
 +      if (ret)
 +              ath6kl_err("Failed to claim sdio irq: %d\n", ret);
 +
 +      sdio_release_host(ar_sdio->func);
 +}
 +
 +static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      int ret;
 +
 +      sdio_claim_host(ar_sdio->func);
 +
 +      /* Mask our function IRQ */
 +      while (atomic_read(&ar_sdio->irq_handling)) {
 +              sdio_release_host(ar_sdio->func);
 +              schedule_timeout(HZ / 10);
 +              sdio_claim_host(ar_sdio->func);
 +      }
 +
 +      ret = sdio_release_irq(ar_sdio->func);
 +      if (ret)
 +              ath6kl_err("Failed to release sdio irq: %d\n", ret);
 +
 +      sdio_release_host(ar_sdio->func);
 +}
 +
 +static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      struct hif_scatter_req *node = NULL;
 +      unsigned long flag;
 +
 +      spin_lock_irqsave(&ar_sdio->scat_lock, flag);
 +
 +      if (!list_empty(&ar_sdio->scat_req)) {
 +              node = list_first_entry(&ar_sdio->scat_req,
 +                                      struct hif_scatter_req, list);
 +              list_del(&node->list);
 +      }
 +
 +      spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
 +
 +      return node;
 +}
 +
 +static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
 +                                      struct hif_scatter_req *s_req)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      unsigned long flag;
 +
 +      spin_lock_irqsave(&ar_sdio->scat_lock, flag);
 +
 +      list_add_tail(&s_req->list, &ar_sdio->scat_req);
 +
 +      spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
 +
 +}
 +
 +/* scatter gather read write request */
 +static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
 +                                      struct hif_scatter_req *scat_req)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      u32 request = scat_req->req;
 +      int status = 0;
 +      unsigned long flags;
 +
 +      if (!scat_req->len)
 +              return -EINVAL;
 +
 +      ath6kl_dbg(ATH6KL_DBG_SCATTER,
 +              "hif-scatter: total len: %d scatter entries: %d\n",
 +              scat_req->len, scat_req->scat_entries);
 +
 +      if (request & HIF_SYNCHRONOUS) {
 +              sdio_claim_host(ar_sdio->func);
 +              status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
 +              sdio_release_host(ar_sdio->func);
 +      } else {
 +              spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
 +              list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
 +              spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
 +              queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
 +      }
 +
 +      return status;
 +}
 +
 +/* clean up scatter support */
 +static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      struct hif_scatter_req *s_req, *tmp_req;
 +      unsigned long flag;
 +
 +      /* empty the free list */
 +      spin_lock_irqsave(&ar_sdio->scat_lock, flag);
 +      list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
 +              list_del(&s_req->list);
 +              spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
 +
 +              if (s_req->busrequest)
 +                      ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
 +              kfree(s_req->virt_dma_buf);
 +              kfree(s_req->sgentries);
 +              kfree(s_req);
 +
 +              spin_lock_irqsave(&ar_sdio->scat_lock, flag);
 +      }
 +      spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
 +}
 +
 +/* setup of HIF scatter resources */
 +static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
 +{
 +      struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 +      struct htc_target *target = ar->htc_target;
 +      int ret;
 +      bool virt_scat = false;
 +
 +      /* check if host supports scatter and it meets our requirements */
 +      if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
 +              ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
 +                         ar_sdio->func->card->host->max_segs,
 +                         MAX_SCATTER_ENTRIES_PER_REQ);
 +              virt_scat = true;
 +      }
 +
 +      if (!virt_scat) {
 +              ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
 +                              MAX_SCATTER_ENTRIES_PER_REQ,
 +                              MAX_SCATTER_REQUESTS, virt_scat);
 +
 +              if (!ret) {
 +                      ath6kl_dbg(ATH6KL_DBG_ANY,
 +                                 "hif-scatter enabled: max scatter req : %d entries: %d\n",
 +                                 MAX_SCATTER_REQUESTS,
 +                                 MAX_SCATTER_ENTRIES_PER_REQ);
 +
 +                      target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
 +                      target->max_xfer_szper_scatreq =
 +                                              MAX_SCATTER_REQ_TRANSFER_SIZE;
 +              } else {
 +                      ath6kl_sdio_cleanup_scatter(ar);
 +                      ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
 +              }
 +      }
 +
 +      if (virt_scat || ret) {
 +              ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
 +                              ATH6KL_SCATTER_ENTRIES_PER_REQ,
 +                              ATH6KL_SCATTER_REQS, virt_scat);
 +
 +              if (ret) {
 +                      ath6kl_err("failed to alloc virtual scatter resources !\n");
 +                      ath6kl_sdio_cleanup_scatter(ar);
 +                      return ret;
 +              }
 +
 +              ath6kl_dbg(ATH6KL_DBG_ANY,
 +                         "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
 +                         ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
 +
 +              target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
 +              target->max_xfer_szper_scatreq =
 +                                      ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
 +      }
 +
 +      return 0;
 +}
 +
 +static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
 +      .read_write_sync = ath6kl_sdio_read_write_sync,
 +      .write_async = ath6kl_sdio_write_async,
 +      .irq_enable = ath6kl_sdio_irq_enable,
 +      .irq_disable = ath6kl_sdio_irq_disable,
 +      .scatter_req_get = ath6kl_sdio_scatter_req_get,
 +      .scatter_req_add = ath6kl_sdio_scatter_req_add,
 +      .enable_scatter = ath6kl_sdio_enable_scatter,
 +      .scat_req_rw = ath6kl_sdio_async_rw_scatter,
 +      .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
 +};
 +
 +static int ath6kl_sdio_probe(struct sdio_func *func,
 +                           const struct sdio_device_id *id)
 +{
 +      int ret;
 +      struct ath6kl_sdio *ar_sdio;
 +      struct ath6kl *ar;
 +      int count;
 +
 +      ath6kl_dbg(ATH6KL_DBG_TRC,
 +                 "%s: func: 0x%X, vendor id: 0x%X, dev id: 0x%X, block size: 0x%X/0x%X\n",
 +                 __func__, func->num, func->vendor,
 +                 func->device, func->max_blksize, func->cur_blksize);
 +
 +      ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
 +      if (!ar_sdio)
 +              return -ENOMEM;
 +
 +      ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
 +      if (!ar_sdio->dma_buffer) {
 +              ret = -ENOMEM;
 +              goto err_hif;
 +      }
 +
 +      ar_sdio->func = func;
 +      sdio_set_drvdata(func, ar_sdio);
 +
 +      ar_sdio->id = id;
 +      ar_sdio->is_disabled = true;
 +
 +      spin_lock_init(&ar_sdio->lock);
 +      spin_lock_init(&ar_sdio->scat_lock);
 +      spin_lock_init(&ar_sdio->wr_async_lock);
 +
 +      INIT_LIST_HEAD(&ar_sdio->scat_req);
 +      INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
 +      INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
 +
 +      INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
 +
 +      for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
 +              ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
 +
 +      ar = ath6kl_core_alloc(&ar_sdio->func->dev);
 +      if (!ar) {
 +              ath6kl_err("Failed to alloc ath6kl core\n");
 +              ret = -ENOMEM;
 +              goto err_dma;
 +      }
 +
 +      ar_sdio->ar = ar;
 +      ar->hif_priv = ar_sdio;
 +      ar->hif_ops = &ath6kl_sdio_ops;
 +
 +      ath6kl_sdio_set_mbox_info(ar);
 +
 +      sdio_claim_host(func);
 +
 +      if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
 +          MANUFACTURER_ID_AR6003_BASE) {
 +              /* enable 4-bit ASYNC interrupt on AR6003 or later */
 +              ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
 +                                              CCCR_SDIO_IRQ_MODE_REG,
 +                                              SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
 +              if (ret) {
 +                      ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
 +                                 ret);
 +                      sdio_release_host(func);
 +                      goto err_dma;
 +              }
 +
 +              ath6kl_dbg(ATH6KL_DBG_TRC, "4-bit async irq mode enabled\n");
 +      }
 +
 +      /* give us some time to enable, in ms */
 +      func->enable_timeout = 100;
 +
 +      sdio_release_host(func);
 +
 +      ret = ath6kl_sdio_power_on(ar_sdio);
 +      if (ret)
 +              goto err_dma;
 +
 +      sdio_claim_host(func);
 +
 +      ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
 +      if (ret) {
 +              ath6kl_err("Set sdio block size %d failed: %d)\n",
 +                         HIF_MBOX_BLOCK_SIZE, ret);
 +              sdio_release_host(func);
 +              goto err_off;
 +      }
 +
 +      sdio_release_host(func);
 +
 +      ret = ath6kl_core_init(ar);
 +      if (ret) {
 +              ath6kl_err("Failed to init ath6kl core\n");
 +              goto err_off;
 +      }
 +
 +      return ret;
 +
 +err_off:
 +      ath6kl_sdio_power_off(ar_sdio);
 +err_dma:
 +      kfree(ar_sdio->dma_buffer);
 +err_hif:
 +      kfree(ar_sdio);
 +
 +      return ret;
 +}
 +
 +static void ath6kl_sdio_remove(struct sdio_func *func)
 +{
 +      struct ath6kl_sdio *ar_sdio;
 +
 +      ar_sdio = sdio_get_drvdata(func);
 +
 +      ath6kl_stop_txrx(ar_sdio->ar);
 +      cancel_work_sync(&ar_sdio->wr_async_work);
 +
 +      ath6kl_unavail_ev(ar_sdio->ar);
 +
 +      ath6kl_sdio_power_off(ar_sdio);
 +
 +      kfree(ar_sdio->dma_buffer);
 +      kfree(ar_sdio);
 +}
 +
 +static const struct sdio_device_id ath6kl_sdio_devices[] = {
 +      {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
 +      {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
 +      {},
 +};
 +
 +MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
 +
 +static struct sdio_driver ath6kl_sdio_driver = {
 +      .name = "ath6kl_sdio",
 +      .id_table = ath6kl_sdio_devices,
 +      .probe = ath6kl_sdio_probe,
 +      .remove = ath6kl_sdio_remove,
 +};
 +
 +static int __init ath6kl_sdio_init(void)
 +{
 +      int ret;
 +
 +      ret = sdio_register_driver(&ath6kl_sdio_driver);
 +      if (ret)
 +              ath6kl_err("sdio driver registration failed: %d\n", ret);
 +
 +      return ret;
 +}
 +
 +static void __exit ath6kl_sdio_exit(void)
 +{
 +      sdio_unregister_driver(&ath6kl_sdio_driver);
 +}
 +
 +module_init(ath6kl_sdio_init);
 +module_exit(ath6kl_sdio_exit);
 +
 +MODULE_AUTHOR("Atheros Communications, Inc.");
 +MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
 +MODULE_LICENSE("Dual BSD/GPL");
 +
 +MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
 +MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
 +MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
 +MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
 +MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
 +MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
 +MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
 +MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
 +MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
 +MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 1d7bb7423f94b2ebfec2b854dff8eed3d6879f7e,95172aad82c549eac767f0dc799680560a2237c1..5d37172acc88d462307afac964ae290786d9a172
   *****************************************************************************/
  #include <linux/pci.h>
  #include <linux/pci-aspm.h>
+ #include <linux/module.h>
  
  #include "iwl-bus.h"
 -#include "iwl-agn.h"
 -#include "iwl-core.h"
  #include "iwl-io.h"
 +#include "iwl-shared.h"
 +#include "iwl-trans.h"
 +#include "iwl-csr.h"
 +#include "iwl-cfg.h"
  
  /* PCI registers */
  #define PCI_CFG_RETRY_TIMEOUT 0x041
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index b2f897acb23857836aee851f83b154e9e5a0e548,5fa73852cb661b73274bd8e86e718a5ff92c80ba..e69aba920295db718968e9dbe1553c76fc2d5e68
   * Larry Finger <Larry.Finger@lwfinger.net>
   *****************************************************************************/
  
++#include <linux/moduleparam.h>
++
  #include "wifi.h"
  
 +static unsigned int debug = DBG_EMERG;
 +module_param(debug, uint, 0);
 +MODULE_PARM_DESC(debug, "Set global debug level for rtlwifi (0,2-5)");
 +
  void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
  {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
Simple merge
Simple merge
Simple merge
index 5b0f1ff8036133c50a4427dfa90e08ec9d54cde7,0000000000000000000000000000000000000000..06c3642e5bdb0f1525c6bde54bf2c73058e1b26e
mode 100644,000000..100644
--- /dev/null
@@@ -1,342 -1,0 +1,343 @@@
 +/*
 + *  Texas Instrument's NFC Driver For Shared Transport.
 + *
 + *  NFC Driver acts as interface between NCI core and
 + *  TI Shared Transport Layer.
 + *
 + *  Copyright (C) 2011 Texas Instruments, Inc.
 + *
 + *  Written by Ilan Elias <ilane@ti.com>
 + *
 + *  Acknowledgements:
 + *  This file is based on btwilink.c, which was written
 + *  by Raja Mani and Pavan Savoy.
 + *
 + *  This program is free software; you can redistribute it and/or modify
 + *  it under the terms of the GNU General Public License version 2 as
 + *  published by the Free Software Foundation.
 + *
 + *  This program is distributed in the hope that it will be useful,
 + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + *  GNU General Public License for more details.
 + *
 + *  You should have received a copy of the GNU General Public License
 + *  along with this program; if not, write to the Free Software
 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + *
 + */
 +#include <linux/platform_device.h>
++#include <linux/module.h>
 +#include <linux/nfc.h>
 +#include <net/nfc/nci.h>
 +#include <net/nfc/nci_core.h>
 +#include <linux/ti_wilink_st.h>
 +
 +#define NFCWILINK_CHNL                        12
 +#define NFCWILINK_OPCODE              7
 +#define NFCWILINK_MAX_FRAME_SIZE      300
 +#define NFCWILINK_HDR_LEN             4
 +#define NFCWILINK_OFFSET_LEN_IN_HDR   1
 +#define NFCWILINK_LEN_SIZE            2
 +#define NFCWILINK_REGISTER_TIMEOUT    8000    /* 8 sec */
 +
 +struct nfcwilink_hdr {
 +      u8 chnl;
 +      u8 opcode;
 +      u16 len;
 +} __packed;
 +
 +struct nfcwilink {
 +      struct platform_device          *pdev;
 +      struct nci_dev                  *ndev;
 +      unsigned long                   flags;
 +
 +      char                            st_register_cb_status;
 +      long                            (*st_write) (struct sk_buff *);
 +      struct completion               st_register_completed;
 +};
 +
 +/* NFCWILINK driver flags */
 +enum {
 +      NFCWILINK_RUNNING,
 +};
 +
 +/* Called by ST when registration is complete */
 +static void nfcwilink_register_complete(void *priv_data, char data)
 +{
 +      struct nfcwilink *drv = priv_data;
 +
 +      nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
 +
 +      /* store ST registration status */
 +      drv->st_register_cb_status = data;
 +
 +      /* complete the wait in nfc_st_open() */
 +      complete(&drv->st_register_completed);
 +}
 +
 +/* Called by ST when receive data is available */
 +static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
 +{
 +      struct nfcwilink *drv = priv_data;
 +      int rc;
 +
 +      nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
 +
 +      if (!skb)
 +              return -EFAULT;
 +
 +      if (!drv) {
 +              kfree_skb(skb);
 +              return -EFAULT;
 +      }
 +
 +      /* strip the ST header
 +      (apart for the chnl byte, which is not received in the hdr) */
 +      skb_pull(skb, (NFCWILINK_HDR_LEN-1));
 +
 +      skb->dev = (void *) drv->ndev;
 +
 +      /* Forward skb to NCI core layer */
 +      rc = nci_recv_frame(skb);
 +      if (rc < 0) {
 +              nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
 +              return rc;
 +      }
 +
 +      return 0;
 +}
 +
 +/* protocol structure registered with ST */
 +static struct st_proto_s nfcwilink_proto = {
 +      .chnl_id = NFCWILINK_CHNL,
 +      .max_frame_size = NFCWILINK_MAX_FRAME_SIZE,
 +      .hdr_len = (NFCWILINK_HDR_LEN-1),       /* not including chnl byte */
 +      .offset_len_in_hdr = NFCWILINK_OFFSET_LEN_IN_HDR,
 +      .len_size = NFCWILINK_LEN_SIZE,
 +      .reserve = 0,
 +      .recv = nfcwilink_receive,
 +      .reg_complete_cb = nfcwilink_register_complete,
 +      .write = NULL,
 +};
 +
 +static int nfcwilink_open(struct nci_dev *ndev)
 +{
 +      struct nfcwilink *drv = nci_get_drvdata(ndev);
 +      unsigned long comp_ret;
 +      int rc;
 +
 +      nfc_dev_dbg(&drv->pdev->dev, "open entry");
 +
 +      if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
 +              rc = -EBUSY;
 +              goto exit;
 +      }
 +
 +      nfcwilink_proto.priv_data = drv;
 +
 +      init_completion(&drv->st_register_completed);
 +      drv->st_register_cb_status = -EINPROGRESS;
 +
 +      rc = st_register(&nfcwilink_proto);
 +      if (rc < 0) {
 +              if (rc == -EINPROGRESS) {
 +                      comp_ret = wait_for_completion_timeout(
 +                      &drv->st_register_completed,
 +                      msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
 +
 +                      nfc_dev_dbg(&drv->pdev->dev,
 +                      "wait_for_completion_timeout returned %ld",
 +                      comp_ret);
 +
 +                      if (comp_ret == 0) {
 +                              /* timeout */
 +                              rc = -ETIMEDOUT;
 +                              goto clear_exit;
 +                      } else if (drv->st_register_cb_status != 0) {
 +                              rc = drv->st_register_cb_status;
 +                              nfc_dev_err(&drv->pdev->dev,
 +                              "st_register_cb failed %d", rc);
 +                              goto clear_exit;
 +                      }
 +              } else {
 +                      nfc_dev_err(&drv->pdev->dev,
 +                              "st_register failed %d", rc);
 +                      goto clear_exit;
 +              }
 +      }
 +
 +      /* st_register MUST fill the write callback */
 +      BUG_ON(nfcwilink_proto.write == NULL);
 +      drv->st_write = nfcwilink_proto.write;
 +
 +      goto exit;
 +
 +clear_exit:
 +      clear_bit(NFCWILINK_RUNNING, &drv->flags);
 +
 +exit:
 +      return rc;
 +}
 +
 +static int nfcwilink_close(struct nci_dev *ndev)
 +{
 +      struct nfcwilink *drv = nci_get_drvdata(ndev);
 +      int rc;
 +
 +      nfc_dev_dbg(&drv->pdev->dev, "close entry");
 +
 +      if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
 +              return 0;
 +
 +      rc = st_unregister(&nfcwilink_proto);
 +      if (rc)
 +              nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
 +
 +      drv->st_write = NULL;
 +
 +      return rc;
 +}
 +
 +static int nfcwilink_send(struct sk_buff *skb)
 +{
 +      struct nci_dev *ndev = (struct nci_dev *)skb->dev;
 +      struct nfcwilink *drv = nci_get_drvdata(ndev);
 +      struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
 +      long len;
 +
 +      nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
 +
 +      if (!test_bit(NFCWILINK_RUNNING, &drv->flags))
 +              return -EBUSY;
 +
 +      /* add the ST hdr to the start of the buffer */
 +      hdr.len = skb->len;
 +      memcpy(skb_push(skb, NFCWILINK_HDR_LEN), &hdr, NFCWILINK_HDR_LEN);
 +
 +      /* Insert skb to shared transport layer's transmit queue.
 +       * Freeing skb memory is taken care in shared transport layer,
 +       * so don't free skb memory here.
 +       */
 +      len = drv->st_write(skb);
 +      if (len < 0) {
 +              kfree_skb(skb);
 +              nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
 +              return -EFAULT;
 +      }
 +
 +      return 0;
 +}
 +
 +static struct nci_ops nfcwilink_ops = {
 +      .open = nfcwilink_open,
 +      .close = nfcwilink_close,
 +      .send = nfcwilink_send,
 +};
 +
 +static int nfcwilink_probe(struct platform_device *pdev)
 +{
 +      static struct nfcwilink *drv;
 +      int rc;
 +      u32 protocols;
 +
 +      nfc_dev_dbg(&pdev->dev, "probe entry");
 +
 +      drv = kzalloc(sizeof(struct nfcwilink), GFP_KERNEL);
 +      if (!drv) {
 +              rc = -ENOMEM;
 +              goto exit;
 +      }
 +
 +      drv->pdev = pdev;
 +
 +      protocols = NFC_PROTO_JEWEL_MASK
 +                      | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
 +                      | NFC_PROTO_ISO14443_MASK
 +                      | NFC_PROTO_NFC_DEP_MASK;
 +
 +      drv->ndev = nci_allocate_device(&nfcwilink_ops,
 +                                      protocols,
 +                                      NFCWILINK_HDR_LEN,
 +                                      0);
 +      if (!drv->ndev) {
 +              nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
 +              rc = -ENOMEM;
 +              goto free_exit;
 +      }
 +
 +      nci_set_parent_dev(drv->ndev, &pdev->dev);
 +      nci_set_drvdata(drv->ndev, drv);
 +
 +      rc = nci_register_device(drv->ndev);
 +      if (rc < 0) {
 +              nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
 +              goto free_dev_exit;
 +      }
 +
 +      dev_set_drvdata(&pdev->dev, drv);
 +
 +      goto exit;
 +
 +free_dev_exit:
 +      nci_free_device(drv->ndev);
 +
 +free_exit:
 +      kfree(drv);
 +
 +exit:
 +      return rc;
 +}
 +
 +static int nfcwilink_remove(struct platform_device *pdev)
 +{
 +      struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
 +      struct nci_dev *ndev;
 +
 +      nfc_dev_dbg(&pdev->dev, "remove entry");
 +
 +      if (!drv)
 +              return -EFAULT;
 +
 +      ndev = drv->ndev;
 +
 +      nci_unregister_device(ndev);
 +      nci_free_device(ndev);
 +
 +      kfree(drv);
 +
 +      dev_set_drvdata(&pdev->dev, NULL);
 +
 +      return 0;
 +}
 +
 +static struct platform_driver nfcwilink_driver = {
 +      .probe = nfcwilink_probe,
 +      .remove = nfcwilink_remove,
 +      .driver = {
 +              .name = "nfcwilink",
 +              .owner = THIS_MODULE,
 +      },
 +};
 +
 +/* ------- Module Init/Exit interfaces ------ */
 +static int __init nfcwilink_init(void)
 +{
 +      printk(KERN_INFO "NFC Driver for TI WiLink");
 +
 +      return platform_driver_register(&nfcwilink_driver);
 +}
 +
 +static void __exit nfcwilink_exit(void)
 +{
 +      platform_driver_unregister(&nfcwilink_driver);
 +}
 +
 +module_init(nfcwilink_init);
 +module_exit(nfcwilink_exit);
 +
 +/* ------ Module Info ------ */
 +
 +MODULE_AUTHOR("Ilan Elias <ilane@ti.com>");
 +MODULE_DESCRIPTION("NFC Driver for TI Shared Transport");
 +MODULE_LICENSE("GPL");
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index d291a54acfad5f94ec127cfdbff6e37d5b8cf85d,b95cbdccc11a506128c5e8c237f3c7121fbed86e..85f4a9a5d12e5d00a7e445bd1e1e0a784eaf7cd0
  #define KMSG_COMPONENT "vmur"
  #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  
 -#include <linux/kernel_stat.h>
  #include <linux/cdev.h>
  #include <linux/slab.h>
+ #include <linux/module.h>
  
  #include <asm/uaccess.h>
  #include <asm/cio.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 628f347404f9b355e1fbe14bd58fdae3f0d7171d,f4e4885f71b79bbae3ca927e53326bde6b450717..31018a8465ab840020f0518a3a7fec8bf5d36ed7
@@@ -88,7 -88,7 +88,8 @@@
   */
  
  #include <linux/timer.h>
 +#include <linux/delay.h>
+ #include <linux/module.h>
  #include <linux/slab.h>
  #include <asm/unaligned.h>
  
Simple merge
index e1aa17840c5bf2e0a2d41784114c3f41cdd5aeb1,04ad8dd1a74cf8267982e29a551efd1503a5ef1c..bb8f49269a68dfc307ca178e1effd95fd72ef3ee
@@@ -11,6 -11,6 +11,7 @@@
  #include <linux/scatterlist.h>
  #include <linux/blkdev.h>
  #include <linux/slab.h>
++#include <linux/export.h>
  
  #include "sas_internal.h"
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index c1647ebafb5e311fe7bfa660536e918436ce3bed,69e23c3ae56458b6327e62ac684b0bd06759d80b..5f723040f9a897ce748a78ad6c465235861b3a6c
  #include <linux/sched.h>
  #include <linux/mmc/sdio.h>
  #include <linux/mmc/sdio_func.h>
 +#include <linux/mmc/card.h>
  #include <linux/semaphore.h>
  #include <linux/firmware.h>
+ #include <linux/module.h>
  #include <asm/unaligned.h>
  #include <defs.h>
  #include <brcmu_wifi.h>
Simple merge
Simple merge
index 4b433c53e5c5e890b1dc0b4d0e8ea2d5cffa6627,a84083caa7c487e2857c2b75353bae19854c042a..a41e29a92ff666b66668d9dcd4d58520d47626f7
@@@ -1,5 -1,8 +1,6 @@@
+ #include <linux/export.h>
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
  #include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
index f448258884c5abbdee74214b327f59b7f48eae72,5a4123707cd5040cb1e4722d48f36e08133f0120..bce505e716d05f04f26a12a52f9fc9e5a4017593
@@@ -1,8 -1,14 +1,9 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "adis16201.h"
  
index 993e239f9426bf15bfb18621bc43ebbac01f496c,ac6460bbe48cb645953383f1a70c432a3bad6af1..aa793a051762592f5a25355bdaecc49906021850
@@@ -1,5 -1,10 +1,6 @@@
+ #include <linux/export.h>
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/gpio.h>
 -#include <linux/workqueue.h>
  #include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
index 50165f9ddc52afe92d17590072a60b15ece48062,4d5d4a1b29ce06e60d92af6179f31c345c0b4952..24bcb8e15c55d708e1735b3e2aff7af06ecfc95e
@@@ -1,8 -1,15 +1,9 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "adis16203.h"
  
index 847f43858f50d89a458b1f81f0cab9e49a8db984,bf4da926a727baf7be15ed741c79ba9e8b0fb018..b58260c90857c6f5e298eb16119331b2b145cd03
@@@ -1,5 -1,10 +1,6 @@@
+ #include <linux/export.h>
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/gpio.h>
 -#include <linux/workqueue.h>
  #include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
index 55b661c98d2db6ccde9e2f65d27e0d56898c95d4,daf5186592d0062649230ba892641c07084eef06..6e542af02c095eebb4281d374dae7f8d10f0a11f
@@@ -1,8 -1,15 +1,9 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "adis16204.h"
  
index f889fe72f33af94f3394c3cc5d90054aa7c7eee7,131a7b6dd60af9f546dc7e1317596e980d439e68..42a8a541bedc148e9436fee110dba84b3017cc82
@@@ -1,5 -1,10 +1,6 @@@
+ #include <linux/export.h>
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/gpio.h>
 -#include <linux/workqueue.h>
  #include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
index 8df8a9791d5ea10bb92f077c3b4a9dc69904212b,03e7b65efecc44a4fb02023fe2de271e165edb63..c5d82c1a55d96f408ebb0797bb788a750ad47aa5
@@@ -1,8 -1,15 +1,9 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "adis16209.h"
  
index 051ba643d3403d6ea984616a58ea67efaa5ff11e,d261647d9050adf150dadb47ce4f3d6ac088c16a..2913bdaae52640ddee9d232d3dd3176843d2b872
@@@ -1,5 -1,10 +1,6 @@@
+ #include <linux/export.h>
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/gpio.h>
 -#include <linux/workqueue.h>
  #include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
index 13f1d142eea3ed8bef37519fa87449e96d0c24f8,1be0de3308ce5986300bab4f280f046267fa5f53..8e0ce568e64c29cf9048ae5044c85ef1c6b96072
@@@ -1,8 -1,15 +1,9 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "adis16240.h"
  
index 5c6fe13408b1c80e6116e1ddeaf9a83c8a370ab8,ce905bb663e1b92887db17d2016a16c4b1b5a9ee..75e89278f338946100b7c5919cfbf9fda8c750cb
@@@ -1,15 -1,20 +1,16 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
  #include <linux/gpio.h>
  #include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
 -#include <linux/sysfs.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../ring_sw.h"
  #include "../kfifo_buf.h"
 -#include "accel.h"
  #include "../trigger.h"
 +#include "../trigger_consumer.h"
  #include "lis3l02dq.h"
  
  /**
index ab6cf9cb5a7fb9fdf849d38b79f31ebbcbea02c1,0000000000000000000000000000000000000000..82e7829ad942fa6cc9c136c1f45cc2b0b0396ec7
mode 100644,000000..100644
--- /dev/null
@@@ -1,992 -1,0 +1,994 @@@
 +/*
 + * AD7280A Lithium Ion Battery Monitoring System
 + *
 + * Copyright 2011 Analog Devices Inc.
 + *
 + * Licensed under the GPL-2.
 + */
 +
 +#include <linux/device.h>
 +#include <linux/kernel.h>
++#include <linux/stat.h>
 +#include <linux/slab.h>
 +#include <linux/sysfs.h>
 +#include <linux/spi/spi.h>
 +#include <linux/err.h>
 +#include <linux/delay.h>
 +#include <linux/interrupt.h>
++#include <linux/module.h>
 +
 +#include "../iio.h"
 +#include "../sysfs.h"
 +
 +#include "ad7280a.h"
 +
 +/* Registers */
 +#define AD7280A_CELL_VOLTAGE_1                0x0  /* D11 to D0, Read only */
 +#define AD7280A_CELL_VOLTAGE_2                0x1  /* D11 to D0, Read only */
 +#define AD7280A_CELL_VOLTAGE_3                0x2  /* D11 to D0, Read only */
 +#define AD7280A_CELL_VOLTAGE_4                0x3  /* D11 to D0, Read only */
 +#define AD7280A_CELL_VOLTAGE_5                0x4  /* D11 to D0, Read only */
 +#define AD7280A_CELL_VOLTAGE_6                0x5  /* D11 to D0, Read only */
 +#define AD7280A_AUX_ADC_1             0x6  /* D11 to D0, Read only */
 +#define AD7280A_AUX_ADC_2             0x7  /* D11 to D0, Read only */
 +#define AD7280A_AUX_ADC_3             0x8  /* D11 to D0, Read only */
 +#define AD7280A_AUX_ADC_4             0x9  /* D11 to D0, Read only */
 +#define AD7280A_AUX_ADC_5             0xA  /* D11 to D0, Read only */
 +#define AD7280A_AUX_ADC_6             0xB  /* D11 to D0, Read only */
 +#define AD7280A_SELF_TEST             0xC  /* D11 to D0, Read only */
 +#define AD7280A_CONTROL_HB            0xD  /* D15 to D8, Read/write */
 +#define AD7280A_CONTROL_LB            0xE  /* D7 to D0, Read/write */
 +#define AD7280A_CELL_OVERVOLTAGE      0xF  /* D7 to D0, Read/write */
 +#define AD7280A_CELL_UNDERVOLTAGE     0x10 /* D7 to D0, Read/write */
 +#define AD7280A_AUX_ADC_OVERVOLTAGE   0x11 /* D7 to D0, Read/write */
 +#define AD7280A_AUX_ADC_UNDERVOLTAGE  0x12 /* D7 to D0, Read/write */
 +#define AD7280A_ALERT                 0x13 /* D7 to D0, Read/write */
 +#define AD7280A_CELL_BALANCE          0x14 /* D7 to D0, Read/write */
 +#define AD7280A_CB1_TIMER             0x15 /* D7 to D0, Read/write */
 +#define AD7280A_CB2_TIMER             0x16 /* D7 to D0, Read/write */
 +#define AD7280A_CB3_TIMER             0x17 /* D7 to D0, Read/write */
 +#define AD7280A_CB4_TIMER             0x18 /* D7 to D0, Read/write */
 +#define AD7280A_CB5_TIMER             0x19 /* D7 to D0, Read/write */
 +#define AD7280A_CB6_TIMER             0x1A /* D7 to D0, Read/write */
 +#define AD7280A_PD_TIMER              0x1B /* D7 to D0, Read/write */
 +#define AD7280A_READ                  0x1C /* D7 to D0, Read/write */
 +#define AD7280A_CNVST_CONTROL         0x1D /* D7 to D0, Read/write */
 +
 +/* Bits and Masks */
 +#define AD7280A_CTRL_HB_CONV_INPUT_ALL                        (0 << 6)
 +#define AD7280A_CTRL_HB_CONV_INPUT_6CELL_AUX1_3_4     (1 << 6)
 +#define AD7280A_CTRL_HB_CONV_INPUT_6CELL              (2 << 6)
 +#define AD7280A_CTRL_HB_CONV_INPUT_SELF_TEST          (3 << 6)
 +#define AD7280A_CTRL_HB_CONV_RES_READ_ALL             (0 << 4)
 +#define AD7280A_CTRL_HB_CONV_RES_READ_6CELL_AUX1_3_4  (1 << 4)
 +#define AD7280A_CTRL_HB_CONV_RES_READ_6CELL           (2 << 4)
 +#define AD7280A_CTRL_HB_CONV_RES_READ_NO              (3 << 4)
 +#define AD7280A_CTRL_HB_CONV_START_CNVST              (0 << 3)
 +#define AD7280A_CTRL_HB_CONV_START_CS                 (1 << 3)
 +#define AD7280A_CTRL_HB_CONV_AVG_DIS                  (0 << 1)
 +#define AD7280A_CTRL_HB_CONV_AVG_2                    (1 << 1)
 +#define AD7280A_CTRL_HB_CONV_AVG_4                    (2 << 1)
 +#define AD7280A_CTRL_HB_CONV_AVG_8                    (3 << 1)
 +#define AD7280A_CTRL_HB_CONV_AVG(x)                   ((x) << 1)
 +#define AD7280A_CTRL_HB_PWRDN_SW                      (1 << 0)
 +
 +#define AD7280A_CTRL_LB_SWRST                         (1 << 7)
 +#define AD7280A_CTRL_LB_ACQ_TIME_400ns                        (0 << 5)
 +#define AD7280A_CTRL_LB_ACQ_TIME_800ns                        (1 << 5)
 +#define AD7280A_CTRL_LB_ACQ_TIME_1200ns                       (2 << 5)
 +#define AD7280A_CTRL_LB_ACQ_TIME_1600ns                       (3 << 5)
 +#define AD7280A_CTRL_LB_ACQ_TIME(x)                   ((x) << 5)
 +#define AD7280A_CTRL_LB_MUST_SET                      (1 << 4)
 +#define AD7280A_CTRL_LB_THERMISTOR_EN                 (1 << 3)
 +#define AD7280A_CTRL_LB_LOCK_DEV_ADDR                 (1 << 2)
 +#define AD7280A_CTRL_LB_INC_DEV_ADDR                  (1 << 1)
 +#define AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN             (1 << 0)
 +
 +#define AD7280A_ALERT_GEN_STATIC_HIGH                 (1 << 6)
 +#define AD7280A_ALERT_RELAY_SIG_CHAIN_DOWN            (3 << 6)
 +
 +#define AD7280A_ALL_CELLS                             (0xAD << 16)
 +
 +#define AD7280A_MAX_SPI_CLK_Hz                700000 /* < 1MHz */
 +#define AD7280A_MAX_CHAIN             8
 +#define AD7280A_CELLS_PER_DEV         6
 +#define AD7280A_BITS                  12
 +#define AD7280A_NUM_CH                        (AD7280A_AUX_ADC_6 - \
 +                                      AD7280A_CELL_VOLTAGE_1 + 1)
 +
 +#define AD7280A_DEVADDR_MASTER                0
 +#define AD7280A_DEVADDR_ALL           0x1F
 +/* 5-bit device address is sent LSB first */
 +#define AD7280A_DEVADDR(addr) (((addr & 0x1) << 4) | ((addr & 0x2) << 3) | \
 +                              (addr & 0x4) | ((addr & 0x8) >> 3) | \
 +                              ((addr & 0x10) >> 4))
 +
 +/* During a read a valid write is mandatory.
 + * So writing to the highest available address (Address 0x1F)
 + * and setting the address all parts bit to 0 is recommended
 + * So the TXVAL is AD7280A_DEVADDR_ALL + CRC
 + */
 +#define AD7280A_READ_TXVAL    0xF800030A
 +
 +/*
 + * AD7280 CRC
 + *
 + * P(x) = x^8 + x^5 + x^3 + x^2 + x^1 + x^0 = 0b100101111 => 0x2F
 + */
 +#define POLYNOM               0x2F
 +#define POLYNOM_ORDER 8
 +#define HIGHBIT               1 << (POLYNOM_ORDER - 1);
 +
 +struct ad7280_state {
 +      struct spi_device               *spi;
 +      struct iio_chan_spec            *channels;
 +      struct iio_dev_attr             *iio_attr;
 +      int                             slave_num;
 +      int                             scan_cnt;
 +      int                             readback_delay_us;
 +      unsigned char                   crc_tab[256];
 +      unsigned char                   ctrl_hb;
 +      unsigned char                   ctrl_lb;
 +      unsigned char                   cell_threshhigh;
 +      unsigned char                   cell_threshlow;
 +      unsigned char                   aux_threshhigh;
 +      unsigned char                   aux_threshlow;
 +      unsigned char                   cb_mask[AD7280A_MAX_CHAIN];
 +};
 +
 +static void ad7280_crc8_build_table(unsigned char *crc_tab)
 +{
 +      unsigned char bit, crc;
 +      int cnt, i;
 +
 +      for (cnt = 0; cnt < 256; cnt++) {
 +              crc = cnt;
 +              for (i = 0; i < 8; i++) {
 +                      bit = crc & HIGHBIT;
 +                      crc <<= 1;
 +                      if (bit)
 +                              crc ^= POLYNOM;
 +              }
 +              crc_tab[cnt] = crc;
 +      }
 +}
 +
 +static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
 +{
 +      unsigned char crc;
 +
 +      crc = crc_tab[val >> 16 & 0xFF];
 +      crc = crc_tab[crc ^ (val >> 8 & 0xFF)];
 +
 +      return  crc ^ (val & 0xFF);
 +}
 +
 +static int ad7280_check_crc(struct ad7280_state *st, unsigned val)
 +{
 +      unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10);
 +
 +      if (crc != ((val >> 2) & 0xFF))
 +              return -EIO;
 +
 +      return 0;
 +}
 +
 +/* After initiating a conversion sequence we need to wait until the
 + * conversion is done. The delay is typically in the range of 15..30 us
 + * however depending an the number of devices in the daisy chain and the
 + * number of averages taken, conversion delays and acquisition time options
 + * it may take up to 250us, in this case we better sleep instead of busy
 + * wait.
 + */
 +
 +static void ad7280_delay(struct ad7280_state *st)
 +{
 +      if (st->readback_delay_us < 50)
 +              udelay(st->readback_delay_us);
 +      else
 +              msleep(1);
 +}
 +
 +static int __ad7280_read32(struct spi_device *spi, unsigned *val)
 +{
 +      unsigned rx_buf, tx_buf = cpu_to_be32(AD7280A_READ_TXVAL);
 +      int ret;
 +
 +      struct spi_transfer t = {
 +              .tx_buf = &tx_buf,
 +              .rx_buf = &rx_buf,
 +              .len = 4,
 +      };
 +      struct spi_message m;
 +
 +      spi_message_init(&m);
 +      spi_message_add_tail(&t, &m);
 +
 +      ret = spi_sync(spi, &m);
 +      if (ret)
 +              return ret;
 +
 +      *val = be32_to_cpu(rx_buf);
 +
 +      return 0;
 +}
 +
 +static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
 +                      unsigned addr, bool all, unsigned val)
 +{
 +      unsigned reg = (devaddr << 27 | addr << 21 |
 +                      (val & 0xFF) << 13 | all << 12);
 +
 +      reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
 +      reg = cpu_to_be32(reg);
 +
 +      return spi_write(st->spi, &reg, 4);
 +}
 +
 +static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
 +                      unsigned addr)
 +{
 +      int ret;
 +      unsigned tmp;
 +
 +      /* turns off the read operation on all parts */
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
 +                      AD7280A_CTRL_HB_CONV_INPUT_ALL |
 +                      AD7280A_CTRL_HB_CONV_RES_READ_NO |
 +                      st->ctrl_hb);
 +      if (ret)
 +              return ret;
 +
 +      /* turns on the read operation on the addressed part */
 +      ret = ad7280_write(st, devaddr, AD7280A_CONTROL_HB, 0,
 +                      AD7280A_CTRL_HB_CONV_INPUT_ALL |
 +                      AD7280A_CTRL_HB_CONV_RES_READ_ALL |
 +                      st->ctrl_hb);
 +      if (ret)
 +              return ret;
 +
 +      /* Set register address on the part to be read from */
 +      ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
 +      if (ret)
 +              return ret;
 +
 +      __ad7280_read32(st->spi, &tmp);
 +
 +      if (ad7280_check_crc(st, tmp))
 +              return -EIO;
 +
 +      if (((tmp >> 27) != devaddr) || (((tmp >> 21) & 0x3F) != addr))
 +              return -EFAULT;
 +
 +      return (tmp >> 13) & 0xFF;
 +}
 +
 +static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
 +                             unsigned addr)
 +{
 +      int ret;
 +      unsigned tmp;
 +
 +      ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
 +      if (ret)
 +              return ret;
 +
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
 +                      AD7280A_CTRL_HB_CONV_INPUT_ALL |
 +                      AD7280A_CTRL_HB_CONV_RES_READ_NO |
 +                      st->ctrl_hb);
 +      if (ret)
 +              return ret;
 +
 +      ret = ad7280_write(st, devaddr, AD7280A_CONTROL_HB, 0,
 +                      AD7280A_CTRL_HB_CONV_INPUT_ALL |
 +                      AD7280A_CTRL_HB_CONV_RES_READ_ALL |
 +                      AD7280A_CTRL_HB_CONV_START_CS |
 +                      st->ctrl_hb);
 +      if (ret)
 +              return ret;
 +
 +      ad7280_delay(st);
 +
 +      __ad7280_read32(st->spi, &tmp);
 +
 +      if (ad7280_check_crc(st, tmp))
 +              return -EIO;
 +
 +      if (((tmp >> 27) != devaddr) || (((tmp >> 23) & 0xF) != addr))
 +              return -EFAULT;
 +
 +      return (tmp >> 11) & 0xFFF;
 +}
 +
 +static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
 +                           unsigned *array)
 +{
 +      int i, ret;
 +      unsigned tmp, sum = 0;
 +
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
 +                         AD7280A_CELL_VOLTAGE_1 << 2);
 +      if (ret)
 +              return ret;
 +
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
 +                      AD7280A_CTRL_HB_CONV_INPUT_ALL |
 +                      AD7280A_CTRL_HB_CONV_RES_READ_ALL |
 +                      AD7280A_CTRL_HB_CONV_START_CS |
 +                      st->ctrl_hb);
 +      if (ret)
 +              return ret;
 +
 +      ad7280_delay(st);
 +
 +      for (i = 0; i < cnt; i++) {
 +              __ad7280_read32(st->spi, &tmp);
 +
 +              if (ad7280_check_crc(st, tmp))
 +                      return -EIO;
 +
 +              if (array)
 +                      array[i] = tmp;
 +              /* only sum cell voltages */
 +              if (((tmp >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6)
 +                      sum += ((tmp >> 11) & 0xFFF);
 +      }
 +
 +      return sum;
 +}
 +
 +static int ad7280_chain_setup(struct ad7280_state *st)
 +{
 +      unsigned val, n;
 +      int ret;
 +
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
 +                      AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN |
 +                      AD7280A_CTRL_LB_LOCK_DEV_ADDR |
 +                      AD7280A_CTRL_LB_MUST_SET |
 +                      AD7280A_CTRL_LB_SWRST |
 +                      st->ctrl_lb);
 +      if (ret)
 +              return ret;
 +
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
 +                      AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN |
 +                      AD7280A_CTRL_LB_LOCK_DEV_ADDR |
 +                      AD7280A_CTRL_LB_MUST_SET |
 +                      st->ctrl_lb);
 +      if (ret)
 +              return ret;
 +
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
 +                      AD7280A_CONTROL_LB << 2);
 +      if (ret)
 +              return ret;
 +
 +      for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
 +              __ad7280_read32(st->spi, &val);
 +              if (val == 0)
 +                      return n - 1;
 +
 +              if (ad7280_check_crc(st, val))
 +                      return -EIO;
 +
 +              if (n != AD7280A_DEVADDR(val >> 27))
 +                      return -EIO;
 +      }
 +
 +      return -EFAULT;
 +}
 +
 +static ssize_t ad7280_show_balance_sw(struct device *dev,
 +                                      struct device_attribute *attr,
 +                                      char *buf)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +
 +      return sprintf(buf, "%d\n",
 +                     !!(st->cb_mask[this_attr->address >> 8] &
 +                     (1 << ((this_attr->address & 0xFF) + 2))));
 +}
 +
 +static ssize_t ad7280_store_balance_sw(struct device *dev,
 +                                       struct device_attribute *attr,
 +                                       const char *buf,
 +                                       size_t len)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      bool readin;
 +      int ret;
 +      unsigned devaddr, ch;
 +
 +      ret = strtobool(buf, &readin);
 +      if (ret)
 +              return ret;
 +
 +      devaddr = this_attr->address >> 8;
 +      ch = this_attr->address & 0xFF;
 +
 +      mutex_lock(&dev_info->mlock);
 +      if (readin)
 +              st->cb_mask[devaddr] |= 1 << (ch + 2);
 +      else
 +              st->cb_mask[devaddr] &= ~(1 << (ch + 2));
 +
 +      ret = ad7280_write(st, devaddr, AD7280A_CELL_BALANCE,
 +                         0, st->cb_mask[devaddr]);
 +      mutex_unlock(&dev_info->mlock);
 +
 +      return ret ? ret : len;
 +}
 +
 +static ssize_t ad7280_show_balance_timer(struct device *dev,
 +                                      struct device_attribute *attr,
 +                                      char *buf)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      int ret;
 +      unsigned msecs;
 +
 +      mutex_lock(&dev_info->mlock);
 +      ret = ad7280_read(st, this_attr->address >> 8,
 +                      this_attr->address & 0xFF);
 +      mutex_unlock(&dev_info->mlock);
 +
 +      if (ret < 0)
 +              return ret;
 +
 +      msecs = (ret >> 3) * 71500;
 +
 +      return sprintf(buf, "%d\n", msecs);
 +}
 +
 +static ssize_t ad7280_store_balance_timer(struct device *dev,
 +                                       struct device_attribute *attr,
 +                                       const char *buf,
 +                                       size_t len)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      long val;
 +      int ret;
 +
 +      ret = strict_strtoul(buf, 10, &val);
 +      if (ret)
 +              return ret;
 +
 +      val /= 71500;
 +
 +      if (val > 31)
 +              return -EINVAL;
 +
 +      mutex_lock(&dev_info->mlock);
 +      ret = ad7280_write(st, this_attr->address >> 8,
 +                         this_attr->address & 0xFF,
 +                         0, (val & 0x1F) << 3);
 +      mutex_unlock(&dev_info->mlock);
 +
 +      return ret ? ret : len;
 +}
 +
 +static struct attribute *ad7280_attributes[AD7280A_MAX_CHAIN *
 +                                         AD7280A_CELLS_PER_DEV * 2 + 1];
 +
 +static struct attribute_group ad7280_attrs_group = {
 +      .attrs = ad7280_attributes,
 +};
 +
 +static int ad7280_channel_init(struct ad7280_state *st)
 +{
 +      int dev, ch, cnt;
 +
 +      st->channels = kzalloc(sizeof(*st->channels) *
 +                              ((st->slave_num + 1) * 12 + 2), GFP_KERNEL);
 +      if (st->channels == NULL)
 +              return -ENOMEM;
 +
 +      for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
 +              for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_AUX_ADC_6; ch++,
 +                      cnt++) {
 +                      if (ch < AD7280A_AUX_ADC_1) {
 +                              st->channels[cnt].type = IIO_IN_DIFF;
 +                              st->channels[cnt].channel = (dev * 6) + ch;
 +                              st->channels[cnt].channel2 =
 +                                      st->channels[cnt].channel + 1;
 +                      } else {
 +                              st->channels[cnt].type = IIO_TEMP;
 +                              st->channels[cnt].channel = (dev * 6) + ch - 6;
 +                      }
 +                      st->channels[cnt].indexed = 1;
 +                      st->channels[cnt].info_mask =
 +                              (1 << IIO_CHAN_INFO_SCALE_SHARED);
 +                      st->channels[cnt].address =
 +                              AD7280A_DEVADDR(dev) << 8 | ch;
 +                      st->channels[cnt].scan_index = cnt;
 +                      st->channels[cnt].scan_type.sign = 'u';
 +                      st->channels[cnt].scan_type.realbits = 12;
 +                      st->channels[cnt].scan_type.storagebits = 32;
 +                      st->channels[cnt].scan_type.shift = 0;
 +              }
 +
 +      st->channels[cnt].type = IIO_IN_DIFF;
 +      st->channels[cnt].channel = 0;
 +      st->channels[cnt].channel2 = dev * 6;
 +      st->channels[cnt].address = AD7280A_ALL_CELLS;
 +      st->channels[cnt].indexed = 1;
 +      st->channels[cnt].info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED);
 +      st->channels[cnt].scan_index = cnt;
 +      st->channels[cnt].scan_type.sign = 'u';
 +      st->channels[cnt].scan_type.realbits = 32;
 +      st->channels[cnt].scan_type.storagebits = 32;
 +      st->channels[cnt].scan_type.shift = 0;
 +      cnt++;
 +      st->channels[cnt].type = IIO_TIMESTAMP;
 +      st->channels[cnt].channel = -1;
 +      st->channels[cnt].scan_index = cnt;
 +      st->channels[cnt].scan_type.sign = 's';
 +      st->channels[cnt].scan_type.realbits = 64;
 +      st->channels[cnt].scan_type.storagebits = 64;
 +      st->channels[cnt].scan_type.shift = 0;
 +
 +      return cnt + 1;
 +}
 +
 +static int ad7280_attr_init(struct ad7280_state *st)
 +{
 +      int dev, ch, cnt;
 +
 +      st->iio_attr = kzalloc(sizeof(*st->iio_attr) * (st->slave_num + 1) *
 +                              AD7280A_CELLS_PER_DEV * 2, GFP_KERNEL);
 +      if (st->iio_attr == NULL)
 +              return -ENOMEM;
 +
 +      for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
 +              for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_CELL_VOLTAGE_6;
 +                      ch++, cnt++) {
 +                      st->iio_attr[cnt].address =
 +                              AD7280A_DEVADDR(dev) << 8 | ch;
 +                      st->iio_attr[cnt].dev_attr.attr.mode =
 +                              S_IWUSR | S_IRUGO;
 +                      st->iio_attr[cnt].dev_attr.show =
 +                              ad7280_show_balance_sw;
 +                      st->iio_attr[cnt].dev_attr.store =
 +                              ad7280_store_balance_sw;
 +                      st->iio_attr[cnt].dev_attr.attr.name =
 +                              kasprintf(GFP_KERNEL,
 +                                      "in%d-in%d_balance_switch_en",
 +                                      (dev * AD7280A_CELLS_PER_DEV) + ch,
 +                                      (dev * AD7280A_CELLS_PER_DEV) + ch + 1);
 +                      ad7280_attributes[cnt] =
 +                              &st->iio_attr[cnt].dev_attr.attr;
 +                      cnt++;
 +                      st->iio_attr[cnt].address =
 +                              AD7280A_DEVADDR(dev) << 8 |
 +                              (AD7280A_CB1_TIMER + ch);
 +                      st->iio_attr[cnt].dev_attr.attr.mode =
 +                              S_IWUSR | S_IRUGO;
 +                      st->iio_attr[cnt].dev_attr.show =
 +                              ad7280_show_balance_timer;
 +                      st->iio_attr[cnt].dev_attr.store =
 +                              ad7280_store_balance_timer;
 +                      st->iio_attr[cnt].dev_attr.attr.name =
 +                              kasprintf(GFP_KERNEL, "in%d-in%d_balance_timer",
 +                                      (dev * AD7280A_CELLS_PER_DEV) + ch,
 +                                      (dev * AD7280A_CELLS_PER_DEV) + ch + 1);
 +                      ad7280_attributes[cnt] =
 +                              &st->iio_attr[cnt].dev_attr.attr;
 +              }
 +
 +      ad7280_attributes[cnt] = NULL;
 +
 +      return 0;
 +}
 +
 +static ssize_t ad7280_read_channel_config(struct device *dev,
 +                                      struct device_attribute *attr,
 +                                      char *buf)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      unsigned val;
 +
 +      switch (this_attr->address) {
 +      case AD7280A_CELL_OVERVOLTAGE:
 +              val = 1000 + (st->cell_threshhigh * 1568) / 100;
 +              break;
 +      case AD7280A_CELL_UNDERVOLTAGE:
 +              val = 1000 + (st->cell_threshlow * 1568) / 100;
 +              break;
 +      case AD7280A_AUX_ADC_OVERVOLTAGE:
 +              val = (st->aux_threshhigh * 196) / 10;
 +              break;
 +      case AD7280A_AUX_ADC_UNDERVOLTAGE:
 +              val = (st->aux_threshlow * 196) / 10;
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      return sprintf(buf, "%d\n", val);
 +}
 +
 +static ssize_t ad7280_write_channel_config(struct device *dev,
 +                                       struct device_attribute *attr,
 +                                       const char *buf,
 +                                       size_t len)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +
 +      long val;
 +      int ret;
 +
 +      ret = strict_strtol(buf, 10, &val);
 +      if (ret)
 +              return ret;
 +
 +      switch (this_attr->address) {
 +      case AD7280A_CELL_OVERVOLTAGE:
 +      case AD7280A_CELL_UNDERVOLTAGE:
 +              val = ((val - 1000) * 100) / 1568; /* LSB 15.68mV */
 +              break;
 +      case AD7280A_AUX_ADC_OVERVOLTAGE:
 +      case AD7280A_AUX_ADC_UNDERVOLTAGE:
 +              val = (val * 10) / 196; /* LSB 19.6mV */
 +              break;
 +      default:
 +              return -EFAULT;
 +      }
 +
 +      val = clamp(val, 0L, 0xFFL);
 +
 +      mutex_lock(&dev_info->mlock);
 +      switch (this_attr->address) {
 +      case AD7280A_CELL_OVERVOLTAGE:
 +              st->cell_threshhigh = val;
 +              break;
 +      case AD7280A_CELL_UNDERVOLTAGE:
 +              st->cell_threshlow = val;
 +              break;
 +      case AD7280A_AUX_ADC_OVERVOLTAGE:
 +              st->aux_threshhigh = val;
 +              break;
 +      case AD7280A_AUX_ADC_UNDERVOLTAGE:
 +              st->aux_threshlow = val;
 +              break;
 +      }
 +
 +      ret = ad7280_write(st, AD7280A_DEVADDR_MASTER,
 +                         this_attr->address, 1, val);
 +
 +      mutex_unlock(&dev_info->mlock);
 +
 +      return ret ? ret : len;
 +}
 +
 +static irqreturn_t ad7280_event_handler(int irq, void *private)
 +{
 +      struct iio_dev *dev_info = private;
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      unsigned *channels;
 +      int i, ret;
 +
 +      channels = kzalloc(sizeof(*channels) * st->scan_cnt, GFP_KERNEL);
 +      if (channels == NULL)
 +              return IRQ_HANDLED;
 +
 +      ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
 +      if (ret < 0)
 +              return IRQ_HANDLED;
 +
 +      for (i = 0; i < st->scan_cnt; i++) {
 +              if (((channels[i] >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6) {
 +                      if (((channels[i] >> 11) & 0xFFF) >=
 +                              st->cell_threshhigh)
 +                              iio_push_event(dev_info, 0,
 +                                      IIO_UNMOD_EVENT_CODE(IIO_IN_DIFF,
 +                                      0,
 +                                      IIO_EV_TYPE_THRESH,
 +                                      IIO_EV_DIR_RISING),
 +                                      iio_get_time_ns());
 +                      else if (((channels[i] >> 11) & 0xFFF) <=
 +                              st->cell_threshlow)
 +                              iio_push_event(dev_info, 0,
 +                                      IIO_UNMOD_EVENT_CODE(IIO_IN_DIFF,
 +                                      0,
 +                                      IIO_EV_TYPE_THRESH,
 +                                      IIO_EV_DIR_FALLING),
 +                                      iio_get_time_ns());
 +              } else {
 +                      if (((channels[i] >> 11) & 0xFFF) >= st->aux_threshhigh)
 +                              iio_push_event(dev_info, 0,
 +                                      IIO_UNMOD_EVENT_CODE(IIO_TEMP,
 +                                      0,
 +                                      IIO_EV_TYPE_THRESH,
 +                                      IIO_EV_DIR_RISING),
 +                                      iio_get_time_ns());
 +                      else if (((channels[i] >> 11) & 0xFFF) <=
 +                              st->aux_threshlow)
 +                              iio_push_event(dev_info, 0,
 +                                      IIO_UNMOD_EVENT_CODE(IIO_TEMP,
 +                                      0,
 +                                      IIO_EV_TYPE_THRESH,
 +                                      IIO_EV_DIR_FALLING),
 +                                      iio_get_time_ns());
 +              }
 +      }
 +
 +      kfree(channels);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static IIO_DEVICE_ATTR_NAMED(in_thresh_low_value,
 +              in-in_thresh_low_value,
 +              S_IRUGO | S_IWUSR,
 +              ad7280_read_channel_config,
 +              ad7280_write_channel_config,
 +              AD7280A_CELL_UNDERVOLTAGE);
 +
 +static IIO_DEVICE_ATTR_NAMED(in_thresh_high_value,
 +              in-in_thresh_high_value,
 +              S_IRUGO | S_IWUSR,
 +              ad7280_read_channel_config,
 +              ad7280_write_channel_config,
 +              AD7280A_CELL_OVERVOLTAGE);
 +
 +static IIO_DEVICE_ATTR(temp_thresh_low_value,
 +              S_IRUGO | S_IWUSR,
 +              ad7280_read_channel_config,
 +              ad7280_write_channel_config,
 +              AD7280A_AUX_ADC_UNDERVOLTAGE);
 +
 +static IIO_DEVICE_ATTR(temp_thresh_high_value,
 +              S_IRUGO | S_IWUSR,
 +              ad7280_read_channel_config,
 +              ad7280_write_channel_config,
 +              AD7280A_AUX_ADC_OVERVOLTAGE);
 +
 +
 +static struct attribute *ad7280_event_attributes[] = {
 +      &iio_dev_attr_in_thresh_low_value.dev_attr.attr,
 +      &iio_dev_attr_in_thresh_high_value.dev_attr.attr,
 +      &iio_dev_attr_temp_thresh_low_value.dev_attr.attr,
 +      &iio_dev_attr_temp_thresh_high_value.dev_attr.attr,
 +      NULL,
 +};
 +
 +static struct attribute_group ad7280_event_attrs_group = {
 +      .attrs = ad7280_event_attributes,
 +};
 +
 +static int ad7280_read_raw(struct iio_dev *dev_info,
 +                         struct iio_chan_spec const *chan,
 +                         int *val,
 +                         int *val2,
 +                         long m)
 +{
 +      struct ad7280_state *st = iio_priv(dev_info);
 +      unsigned int scale_uv;
 +      int ret;
 +
 +      switch (m) {
 +      case 0:
 +              mutex_lock(&dev_info->mlock);
 +              if (chan->address == AD7280A_ALL_CELLS)
 +                      ret = ad7280_read_all_channels(st, st->scan_cnt, NULL);
 +              else
 +                      ret = ad7280_read_channel(st, chan->address >> 8,
 +                                                chan->address & 0xFF);
 +              mutex_unlock(&dev_info->mlock);
 +
 +              if (ret < 0)
 +                      return ret;
 +
 +              *val = ret;
 +
 +              return IIO_VAL_INT;
 +      case (1 << IIO_CHAN_INFO_SCALE_SHARED):
 +              if ((chan->address & 0xFF) <= AD7280A_CELL_VOLTAGE_6)
 +                      scale_uv = (4000 * 1000) >> AD7280A_BITS;
 +              else
 +                      scale_uv = (5000 * 1000) >> AD7280A_BITS;
 +
 +              *val =  scale_uv / 1000;
 +              *val2 = (scale_uv % 1000) * 1000;
 +              return IIO_VAL_INT_PLUS_MICRO;
 +      }
 +      return -EINVAL;
 +}
 +
 +static const struct iio_info ad7280_info = {
 +      .read_raw = &ad7280_read_raw,
 +      .num_interrupt_lines = 1,
 +      .event_attrs = &ad7280_event_attrs_group,
 +      .attrs = &ad7280_attrs_group,
 +      .driver_module = THIS_MODULE,
 +};
 +
 +static const struct ad7280_platform_data ad7793_default_pdata = {
 +      .acquisition_time = AD7280A_ACQ_TIME_400ns,
 +      .conversion_averaging = AD7280A_CONV_AVG_DIS,
 +      .thermistor_term_en = true,
 +};
 +
 +static int __devinit ad7280_probe(struct spi_device *spi)
 +{
 +      const struct ad7280_platform_data *pdata = spi->dev.platform_data;
 +      struct ad7280_state *st;
 +      int ret, regdone = 0;
 +      const unsigned short tACQ_ns[4] = {465, 1010, 1460, 1890};
 +      const unsigned short nAVG[4] = {1, 2, 4, 8};
 +      struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
 +
 +      if (indio_dev == NULL)
 +              return -ENOMEM;
 +
 +      st = iio_priv(indio_dev);
 +      spi_set_drvdata(spi, indio_dev);
 +      st->spi = spi;
 +
 +      if (!pdata)
 +              pdata = &ad7793_default_pdata;
 +
 +      ad7280_crc8_build_table(st->crc_tab);
 +
 +      st->spi->max_speed_hz = AD7280A_MAX_SPI_CLK_Hz;
 +      st->spi->mode = SPI_MODE_1;
 +      spi_setup(st->spi);
 +
 +      st->ctrl_lb = AD7280A_CTRL_LB_ACQ_TIME(pdata->acquisition_time & 0x3);
 +      st->ctrl_hb = AD7280A_CTRL_HB_CONV_AVG(pdata->conversion_averaging
 +                      & 0x3) | (pdata->thermistor_term_en ?
 +                      AD7280A_CTRL_LB_THERMISTOR_EN : 0);
 +
 +      ret = ad7280_chain_setup(st);
 +      if (ret < 0)
 +              goto error_free_device;
 +
 +      st->slave_num = ret;
 +      st->scan_cnt = (st->slave_num + 1) * AD7280A_NUM_CH;
 +      st->cell_threshhigh = 0xFF;
 +      st->aux_threshhigh = 0xFF;
 +
 +      /*
 +       * Total Conversion Time = ((tACQ + tCONV) *
 +       *                         (Number of Conversions per Part)) âˆ’
 +       *                         tACQ + ((N - 1) * tDELAY)
 +       *
 +       * Readback Delay = Total Conversion Time + tWAIT
 +       */
 +
 +      st->readback_delay_us =
 +              ((tACQ_ns[pdata->acquisition_time & 0x3] + 695) *
 +              (AD7280A_NUM_CH * nAVG[pdata->conversion_averaging & 0x3]))
 +              - tACQ_ns[pdata->acquisition_time & 0x3] +
 +              st->slave_num * 250;
 +
 +      /* Convert to usecs */
 +      st->readback_delay_us = DIV_ROUND_UP(st->readback_delay_us, 1000);
 +      st->readback_delay_us += 5; /* Add tWAIT */
 +
 +      indio_dev->name = spi_get_device_id(spi)->name;
 +      indio_dev->dev.parent = &spi->dev;
 +      indio_dev->modes = INDIO_DIRECT_MODE;
 +
 +      ret = ad7280_channel_init(st);
 +      if (ret < 0)
 +              goto error_free_device;
 +
 +      indio_dev->num_channels = ret;
 +      indio_dev->channels = st->channels;
 +      indio_dev->info = &ad7280_info;
 +
 +      ret = ad7280_attr_init(st);
 +      if (ret < 0)
 +              goto error_free_channels;
 +
 +      ret = iio_device_register(indio_dev);
 +      if (ret)
 +              goto error_free_attr;
 +      regdone = 1;
 +
 +      if (spi->irq > 0) {
 +              ret = ad7280_write(st, AD7280A_DEVADDR_MASTER,
 +                                 AD7280A_ALERT, 1,
 +                                 AD7280A_ALERT_RELAY_SIG_CHAIN_DOWN);
 +              if (ret)
 +                      goto error_free_attr;
 +
 +              ret = ad7280_write(st, AD7280A_DEVADDR(st->slave_num),
 +                                 AD7280A_ALERT, 0,
 +                                 AD7280A_ALERT_GEN_STATIC_HIGH |
 +                                 (pdata->chain_last_alert_ignore & 0xF));
 +              if (ret)
 +                      goto error_free_attr;
 +
 +              ret = request_threaded_irq(spi->irq,
 +                                         NULL,
 +                                         ad7280_event_handler,
 +                                         IRQF_TRIGGER_FALLING |
 +                                         IRQF_ONESHOT,
 +                                         indio_dev->name,
 +                                         indio_dev);
 +              if (ret)
 +                      goto error_free_attr;
 +      }
 +
 +      return 0;
 +
 +error_free_attr:
 +      kfree(st->iio_attr);
 +
 +error_free_channels:
 +      kfree(st->channels);
 +
 +error_free_device:
 +      if (regdone)
 +              iio_device_unregister(indio_dev);
 +      else
 +              iio_free_device(indio_dev);
 +
 +      return ret;
 +}
 +
 +static int __devexit ad7280_remove(struct spi_device *spi)
 +{
 +      struct iio_dev *indio_dev = spi_get_drvdata(spi);
 +      struct ad7280_state *st = iio_priv(indio_dev);
 +
 +      if (spi->irq > 0)
 +              free_irq(spi->irq, indio_dev);
 +
 +      ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
 +                      AD7280A_CTRL_HB_PWRDN_SW | st->ctrl_hb);
 +
 +      kfree(st->channels);
 +      kfree(st->iio_attr);
 +      iio_device_unregister(indio_dev);
 +
 +      return 0;
 +}
 +
 +static const struct spi_device_id ad7280_id[] = {
 +      {"ad7280a", 0},
 +      {}
 +};
 +
 +static struct spi_driver ad7280_driver = {
 +      .driver = {
 +              .name   = "ad7280",
 +              .bus    = &spi_bus_type,
 +              .owner  = THIS_MODULE,
 +      },
 +      .probe          = ad7280_probe,
 +      .remove         = __devexit_p(ad7280_remove),
 +      .id_table       = ad7280_id,
 +};
 +
 +static int __init ad7280_init(void)
 +{
 +      return spi_register_driver(&ad7280_driver);
 +}
 +module_init(ad7280_init);
 +
 +static void __exit ad7280_exit(void)
 +{
 +      spi_unregister_driver(&ad7280_driver);
 +}
 +module_exit(ad7280_exit);
 +
 +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 +MODULE_DESCRIPTION("Analog Devices AD7280A");
 +MODULE_LICENSE("GPL v2");
Simple merge
index 10f8a666226b47b5f2408a36a5878718572324c2,469d6b208929f5b38f531122e969962679d8d37a..c1b00a470ad5c16b1bdb83a288e9be464c20c3ac
@@@ -1,5 -1,10 +1,6 @@@
+ #include <linux/export.h>
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/gpio.h>
 -#include <linux/workqueue.h>
  #include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
index 2f2b2160f44d73b1812e929461f87bd761fd4a85,8f57a6b823f78704cd78cc1f39a2b088b44ebf7d..8299cd18d705f2b32445858942080377852c445e
@@@ -1,8 -1,15 +1,9 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "adis16260.h"
  
index acc0e69cb58874a05e02521e5a2f7e96aea0ae8b,0000000000000000000000000000000000000000..ab08cd5f7c305f664e16e8ba956c92a9493e7d65
mode 100644,000000..100644
--- /dev/null
@@@ -1,818 -1,0 +1,820 @@@
 +/*
 + * AD5933 AD5934 Impedance Converter, Network Analyzer
 + *
 + * Copyright 2011 Analog Devices Inc.
 + *
 + * Licensed under the GPL-2.
 + */
 +
 +#include <linux/interrupt.h>
 +#include <linux/device.h>
 +#include <linux/kernel.h>
 +#include <linux/sysfs.h>
++#include <linux/stat.h>
 +#include <linux/i2c.h>
 +#include <linux/regulator/consumer.h>
 +#include <linux/slab.h>
 +#include <linux/types.h>
 +#include <linux/err.h>
 +#include <linux/delay.h>
++#include <linux/module.h>
 +#include <asm/div64.h>
 +
 +#include "../iio.h"
 +#include "../sysfs.h"
 +#include "../ring_generic.h"
 +#include "../ring_sw.h"
 +
 +#include "ad5933.h"
 +
 +/* AD5933/AD5934 Registers */
 +#define AD5933_REG_CONTROL_HB         0x80    /* R/W, 2 bytes */
 +#define AD5933_REG_CONTROL_LB         0x81    /* R/W, 2 bytes */
 +#define AD5933_REG_FREQ_START         0x82    /* R/W, 3 bytes */
 +#define AD5933_REG_FREQ_INC           0x85    /* R/W, 3 bytes */
 +#define AD5933_REG_INC_NUM            0x88    /* R/W, 2 bytes, 9 bit */
 +#define AD5933_REG_SETTLING_CYCLES    0x8A    /* R/W, 2 bytes */
 +#define AD5933_REG_STATUS             0x8F    /* R, 1 byte */
 +#define AD5933_REG_TEMP_DATA          0x92    /* R, 2 bytes*/
 +#define AD5933_REG_REAL_DATA          0x94    /* R, 2 bytes*/
 +#define AD5933_REG_IMAG_DATA          0x96    /* R, 2 bytes*/
 +
 +/* AD5933_REG_CONTROL_HB Bits */
 +#define AD5933_CTRL_INIT_START_FREQ   (0x1 << 4)
 +#define AD5933_CTRL_START_SWEEP               (0x2 << 4)
 +#define AD5933_CTRL_INC_FREQ          (0x3 << 4)
 +#define AD5933_CTRL_REPEAT_FREQ               (0x4 << 4)
 +#define AD5933_CTRL_MEASURE_TEMP      (0x9 << 4)
 +#define AD5933_CTRL_POWER_DOWN                (0xA << 4)
 +#define AD5933_CTRL_STANDBY           (0xB << 4)
 +
 +#define AD5933_CTRL_RANGE_2000mVpp    (0x0 << 1)
 +#define AD5933_CTRL_RANGE_200mVpp     (0x1 << 1)
 +#define AD5933_CTRL_RANGE_400mVpp     (0x2 << 1)
 +#define AD5933_CTRL_RANGE_1000mVpp    (0x3 << 1)
 +#define AD5933_CTRL_RANGE(x)          ((x) << 1)
 +
 +#define AD5933_CTRL_PGA_GAIN_1                (0x1 << 0)
 +#define AD5933_CTRL_PGA_GAIN_5                (0x0 << 0)
 +
 +/* AD5933_REG_CONTROL_LB Bits */
 +#define AD5933_CTRL_RESET             (0x1 << 4)
 +#define AD5933_CTRL_INT_SYSCLK                (0x0 << 3)
 +#define AD5933_CTRL_EXT_SYSCLK                (0x1 << 3)
 +
 +/* AD5933_REG_STATUS Bits */
 +#define AD5933_STAT_TEMP_VALID                (0x1 << 0)
 +#define AD5933_STAT_DATA_VALID                (0x1 << 1)
 +#define AD5933_STAT_SWEEP_DONE                (0x1 << 2)
 +
 +/* I2C Block Commands */
 +#define AD5933_I2C_BLOCK_WRITE                0xA0
 +#define AD5933_I2C_BLOCK_READ         0xA1
 +#define AD5933_I2C_ADDR_POINTER               0xB0
 +
 +/* Device Specs */
 +#define AD5933_INT_OSC_FREQ_Hz                16776000
 +#define AD5933_MAX_OUTPUT_FREQ_Hz     100000
 +#define AD5933_MAX_RETRIES            100
 +
 +#define AD5933_OUT_RANGE              1
 +#define AD5933_OUT_RANGE_AVAIL                2
 +#define AD5933_OUT_SETTLING_CYCLES    3
 +#define AD5933_IN_PGA_GAIN            4
 +#define AD5933_IN_PGA_GAIN_AVAIL      5
 +#define AD5933_FREQ_POINTS            6
 +
 +#define AD5933_POLL_TIME_ms           10
 +#define AD5933_INIT_EXCITATION_TIME_ms        100
 +
 +struct ad5933_state {
 +      struct i2c_client               *client;
 +      struct regulator                *reg;
 +      struct ad5933_platform_data     *pdata;
 +      struct delayed_work             work;
 +      unsigned long                   mclk_hz;
 +      unsigned char                   ctrl_hb;
 +      unsigned char                   ctrl_lb;
 +      unsigned                        range_avail[4];
 +      unsigned short                  vref_mv;
 +      unsigned short                  settling_cycles;
 +      unsigned short                  freq_points;
 +      unsigned                        freq_start;
 +      unsigned                        freq_inc;
 +      unsigned                        state;
 +      unsigned                        poll_time_jiffies;
 +};
 +
 +static struct ad5933_platform_data ad5933_default_pdata  = {
 +      .vref_mv = 3300,
 +};
 +
 +static struct iio_chan_spec ad5933_channels[] = {
 +      IIO_CHAN(IIO_TEMP, 0, 1, 1, NULL, 0, 0, 0,
 +               0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
 +      /* Ring Channels */
 +      IIO_CHAN(IIO_IN, 0, 1, 0, "real_raw", 0, 0,
 +               (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
 +               AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
 +      IIO_CHAN(IIO_IN, 0, 1, 0, "imag_raw", 0, 0,
 +               (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
 +               AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
 +};
 +
 +static int ad5933_i2c_write(struct i2c_client *client,
 +                            u8 reg, u8 len, u8 *data)
 +{
 +      int ret;
 +
 +      while (len--) {
 +              ret = i2c_smbus_write_byte_data(client, reg++, *data++);
 +              if (ret < 0) {
 +                      dev_err(&client->dev, "I2C write error\n");
 +                      return ret;
 +              }
 +      }
 +      return 0;
 +}
 +
 +static int ad5933_i2c_read(struct i2c_client *client,
 +                            u8 reg, u8 len, u8 *data)
 +{
 +      int ret;
 +
 +      while (len--) {
 +              ret = i2c_smbus_read_byte_data(client, reg++);
 +              if (ret < 0) {
 +                      dev_err(&client->dev, "I2C read error\n");
 +                      return ret;
 +              }
 +              *data++ = ret;
 +      }
 +      return 0;
 +}
 +
 +static int ad5933_cmd(struct ad5933_state *st, unsigned char cmd)
 +{
 +      unsigned char dat = st->ctrl_hb | cmd;
 +
 +      return ad5933_i2c_write(st->client,
 +                      AD5933_REG_CONTROL_HB, 1, &dat);
 +}
 +
 +static int ad5933_reset(struct ad5933_state *st)
 +{
 +      unsigned char dat = st->ctrl_lb | AD5933_CTRL_RESET;
 +      return ad5933_i2c_write(st->client,
 +                      AD5933_REG_CONTROL_LB, 1, &dat);
 +}
 +
 +static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
 +{
 +      unsigned char val, timeout = AD5933_MAX_RETRIES;
 +      int ret;
 +
 +      while (timeout--) {
 +              ret =  ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &val);
 +              if (ret < 0)
 +                      return ret;
 +              if (val & event)
 +                      return val;
 +              cpu_relax();
 +              mdelay(1);
 +      }
 +
 +      return -EAGAIN;
 +}
 +
 +static int ad5933_set_freq(struct ad5933_state *st,
 +                         unsigned reg, unsigned long freq)
 +{
 +      unsigned long long freqreg;
 +      union {
 +              u32 d32;
 +              u8 d8[4];
 +      } dat;
 +
 +      freqreg = (u64) freq * (u64) (1 << 27);
 +      do_div(freqreg, st->mclk_hz / 4);
 +
 +      switch (reg) {
 +      case AD5933_REG_FREQ_START:
 +              st->freq_start = freq;
 +              break;
 +      case AD5933_REG_FREQ_INC:
 +              st->freq_inc = freq;
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      dat.d32 = cpu_to_be32(freqreg);
 +      return ad5933_i2c_write(st->client, reg, 3, &dat.d8[1]);
 +}
 +
 +static int ad5933_setup(struct ad5933_state *st)
 +{
 +      unsigned short dat;
 +      int ret;
 +
 +      ret = ad5933_reset(st);
 +      if (ret < 0)
 +              return ret;
 +
 +      ret = ad5933_set_freq(st, AD5933_REG_FREQ_START, 10000);
 +      if (ret < 0)
 +              return ret;
 +
 +      ret = ad5933_set_freq(st, AD5933_REG_FREQ_INC, 200);
 +      if (ret < 0)
 +              return ret;
 +
 +      st->settling_cycles = 10;
 +      dat = cpu_to_be16(st->settling_cycles);
 +
 +      ret = ad5933_i2c_write(st->client,
 +                      AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
 +      if (ret < 0)
 +              return ret;
 +
 +      st->freq_points = 100;
 +      dat = cpu_to_be16(st->freq_points);
 +
 +      return ad5933_i2c_write(st->client, AD5933_REG_INC_NUM, 2, (u8 *)&dat);
 +}
 +
 +static void ad5933_calc_out_ranges(struct ad5933_state *st)
 +{
 +      int i;
 +      unsigned normalized_3v3[4] = {1980, 198, 383, 970};
 +
 +      for (i = 0; i < 4; i++)
 +              st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
 +
 +}
 +
 +/*
 + * handles: AD5933_REG_FREQ_START and AD5933_REG_FREQ_INC
 + */
 +
 +static ssize_t ad5933_show_frequency(struct device *dev,
 +                                      struct device_attribute *attr,
 +                                      char *buf)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad5933_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      int ret;
 +      unsigned long long freqreg;
 +      union {
 +              u32 d32;
 +              u8 d8[4];
 +      } dat;
 +
 +      mutex_lock(&dev_info->mlock);
 +      ret = ad5933_i2c_read(st->client, this_attr->address, 3, &dat.d8[1]);
 +      mutex_unlock(&dev_info->mlock);
 +      if (ret < 0)
 +              return ret;
 +
 +      freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
 +
 +      freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4);
 +      do_div(freqreg, 1 << 27);
 +
 +      return sprintf(buf, "%d\n", (int) freqreg);
 +}
 +
 +static ssize_t ad5933_store_frequency(struct device *dev,
 +                                       struct device_attribute *attr,
 +                                       const char *buf,
 +                                       size_t len)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad5933_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      long val;
 +      int ret;
 +
 +      ret = strict_strtoul(buf, 10, &val);
 +      if (ret)
 +              return ret;
 +
 +      if (val > AD5933_MAX_OUTPUT_FREQ_Hz)
 +              return -EINVAL;
 +
 +      mutex_lock(&dev_info->mlock);
 +      ret = ad5933_set_freq(st, this_attr->address, val);
 +      mutex_unlock(&dev_info->mlock);
 +
 +      return ret ? ret : len;
 +}
 +
 +static IIO_DEVICE_ATTR(out0_freq_start, S_IRUGO | S_IWUSR,
 +                      ad5933_show_frequency,
 +                      ad5933_store_frequency,
 +                      AD5933_REG_FREQ_START);
 +
 +static IIO_DEVICE_ATTR(out0_freq_increment, S_IRUGO | S_IWUSR,
 +                      ad5933_show_frequency,
 +                      ad5933_store_frequency,
 +                      AD5933_REG_FREQ_INC);
 +
 +static ssize_t ad5933_show(struct device *dev,
 +                                      struct device_attribute *attr,
 +                                      char *buf)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad5933_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      int ret = 0, len = 0;
 +
 +      mutex_lock(&dev_info->mlock);
 +      switch (this_attr->address) {
 +      case AD5933_OUT_RANGE:
 +              len = sprintf(buf, "%d\n",
 +                            st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
 +              break;
 +      case AD5933_OUT_RANGE_AVAIL:
 +              len = sprintf(buf, "%d %d %d %d\n", st->range_avail[0],
 +                            st->range_avail[3], st->range_avail[2],
 +                            st->range_avail[1]);
 +              break;
 +      case AD5933_OUT_SETTLING_CYCLES:
 +              len = sprintf(buf, "%d\n", st->settling_cycles);
 +              break;
 +      case AD5933_IN_PGA_GAIN:
 +              len = sprintf(buf, "%s\n",
 +                            (st->ctrl_hb & AD5933_CTRL_PGA_GAIN_1) ?
 +                            "1" : "0.2");
 +              break;
 +      case AD5933_IN_PGA_GAIN_AVAIL:
 +              len = sprintf(buf, "1 0.2\n");
 +              break;
 +      case AD5933_FREQ_POINTS:
 +              len = sprintf(buf, "%d\n", st->freq_points);
 +              break;
 +      default:
 +              ret = -EINVAL;
 +      }
 +
 +      mutex_unlock(&dev_info->mlock);
 +      return ret ? ret : len;
 +}
 +
 +static ssize_t ad5933_store(struct device *dev,
 +                                       struct device_attribute *attr,
 +                                       const char *buf,
 +                                       size_t len)
 +{
 +      struct iio_dev *dev_info = dev_get_drvdata(dev);
 +      struct ad5933_state *st = iio_priv(dev_info);
 +      struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 +      long val;
 +      int i, ret = 0;
 +      unsigned short dat;
 +
 +      if (this_attr->address != AD5933_IN_PGA_GAIN) {
 +              ret = strict_strtol(buf, 10, &val);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      mutex_lock(&dev_info->mlock);
 +      switch (this_attr->address) {
 +      case AD5933_OUT_RANGE:
 +              for (i = 0; i < 4; i++)
 +                      if (val == st->range_avail[i]) {
 +                              st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
 +                              st->ctrl_hb |= AD5933_CTRL_RANGE(i);
 +                              ret = ad5933_cmd(st, 0);
 +                              break;
 +                      }
 +              ret = -EINVAL;
 +              break;
 +      case AD5933_IN_PGA_GAIN:
 +              if (sysfs_streq(buf, "1")) {
 +                      st->ctrl_hb |= AD5933_CTRL_PGA_GAIN_1;
 +              } else if (sysfs_streq(buf, "0.2")) {
 +                      st->ctrl_hb &= ~AD5933_CTRL_PGA_GAIN_1;
 +              } else {
 +                      ret = -EINVAL;
 +                      break;
 +              }
 +              ret = ad5933_cmd(st, 0);
 +              break;
 +      case AD5933_OUT_SETTLING_CYCLES:
 +              val = clamp(val, 0L, 0x7FFL);
 +              st->settling_cycles = val;
 +
 +              /* 2x, 4x handling, see datasheet */
 +              if (val > 511)
 +                      val = (val >> 1) | (1 << 9);
 +              else if (val > 1022)
 +                      val = (val >> 2) | (3 << 9);
 +
 +              dat = cpu_to_be16(val);
 +              ret = ad5933_i2c_write(st->client,
 +                              AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
 +              break;
 +      case AD5933_FREQ_POINTS:
 +              val = clamp(val, 0L, 511L);
 +              st->freq_points = val;
 +
 +              dat = cpu_to_be16(val);
 +              ret = ad5933_i2c_write(st->client, AD5933_REG_INC_NUM, 2,
 +                                     (u8 *)&dat);
 +              break;
 +      default:
 +              ret = -EINVAL;
 +      }
 +
 +      mutex_unlock(&dev_info->mlock);
 +      return ret ? ret : len;
 +}
 +
 +static IIO_DEVICE_ATTR(out0_scale, S_IRUGO | S_IWUSR,
 +                      ad5933_show,
 +                      ad5933_store,
 +                      AD5933_OUT_RANGE);
 +
 +static IIO_DEVICE_ATTR(out0_scale_available, S_IRUGO,
 +                      ad5933_show,
 +                      NULL,
 +                      AD5933_OUT_RANGE_AVAIL);
 +
 +static IIO_DEVICE_ATTR(in0_scale, S_IRUGO | S_IWUSR,
 +                      ad5933_show,
 +                      ad5933_store,
 +                      AD5933_IN_PGA_GAIN);
 +
 +static IIO_DEVICE_ATTR(in0_scale_available, S_IRUGO,
 +                      ad5933_show,
 +                      NULL,
 +                      AD5933_IN_PGA_GAIN_AVAIL);
 +
 +static IIO_DEVICE_ATTR(out0_freq_points, S_IRUGO | S_IWUSR,
 +                      ad5933_show,
 +                      ad5933_store,
 +                      AD5933_FREQ_POINTS);
 +
 +static IIO_DEVICE_ATTR(out0_settling_cycles, S_IRUGO | S_IWUSR,
 +                      ad5933_show,
 +                      ad5933_store,
 +                      AD5933_OUT_SETTLING_CYCLES);
 +
 +/* note:
 + * ideally we would handle the scale attributes via the iio_info
 + * (read|write)_raw methods, however this part is a untypical since we
 + * don't create dedicated sysfs channel attributes for out0 and in0.
 + */
 +static struct attribute *ad5933_attributes[] = {
 +      &iio_dev_attr_out0_scale.dev_attr.attr,
 +      &iio_dev_attr_out0_scale_available.dev_attr.attr,
 +      &iio_dev_attr_out0_freq_start.dev_attr.attr,
 +      &iio_dev_attr_out0_freq_increment.dev_attr.attr,
 +      &iio_dev_attr_out0_freq_points.dev_attr.attr,
 +      &iio_dev_attr_out0_settling_cycles.dev_attr.attr,
 +      &iio_dev_attr_in0_scale.dev_attr.attr,
 +      &iio_dev_attr_in0_scale_available.dev_attr.attr,
 +      NULL
 +};
 +
 +static const struct attribute_group ad5933_attribute_group = {
 +      .attrs = ad5933_attributes,
 +};
 +
 +static int ad5933_read_raw(struct iio_dev *dev_info,
 +                         struct iio_chan_spec const *chan,
 +                         int *val,
 +                         int *val2,
 +                         long m)
 +{
 +      struct ad5933_state *st = iio_priv(dev_info);
 +      unsigned short dat;
 +      int ret = -EINVAL;
 +
 +      mutex_lock(&dev_info->mlock);
 +      switch (m) {
 +      case 0:
 +              if (iio_ring_enabled(dev_info)) {
 +                      ret = -EBUSY;
 +                      goto out;
 +              }
 +              ret = ad5933_cmd(st, AD5933_CTRL_MEASURE_TEMP);
 +              if (ret < 0)
 +                      goto out;
 +              ret = ad5933_wait_busy(st, AD5933_STAT_TEMP_VALID);
 +              if (ret < 0)
 +                      goto out;
 +
 +              ret = ad5933_i2c_read(st->client,
 +                              AD5933_REG_TEMP_DATA, 2,
 +                              (u8 *)&dat);
 +              if (ret < 0)
 +                      goto out;
 +              mutex_unlock(&dev_info->mlock);
 +              ret = be16_to_cpu(dat);
 +              /* Temp in Milli degrees Celsius */
 +              if (ret < 8192)
 +                      *val = ret * 1000 / 32;
 +              else
 +                      *val = (ret - 16384) * 1000 / 32;
 +
 +              return IIO_VAL_INT;
 +      }
 +
 +out:
 +      mutex_unlock(&dev_info->mlock);
 +      return ret;
 +}
 +
 +static const struct iio_info ad5933_info = {
 +      .read_raw = &ad5933_read_raw,
 +      .attrs = &ad5933_attribute_group,
 +      .driver_module = THIS_MODULE,
 +};
 +
 +static int ad5933_ring_preenable(struct iio_dev *indio_dev)
 +{
 +      struct ad5933_state *st = iio_priv(indio_dev);
 +      struct iio_ring_buffer *ring = indio_dev->ring;
 +      size_t d_size;
 +      int ret;
 +
 +      if (!ring->scan_count)
 +              return -EINVAL;
 +
 +      d_size = ring->scan_count *
 +               ad5933_channels[1].scan_type.storagebits / 8;
 +
 +      if (indio_dev->ring->access->set_bytes_per_datum)
 +              indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
 +                                                           d_size);
 +
 +      ret = ad5933_reset(st);
 +      if (ret < 0)
 +              return ret;
 +
 +      ret = ad5933_cmd(st, AD5933_CTRL_STANDBY);
 +      if (ret < 0)
 +              return ret;
 +
 +      ret = ad5933_cmd(st, AD5933_CTRL_INIT_START_FREQ);
 +      if (ret < 0)
 +              return ret;
 +
 +      st->state = AD5933_CTRL_INIT_START_FREQ;
 +
 +      return 0;
 +}
 +
 +static int ad5933_ring_postenable(struct iio_dev *indio_dev)
 +{
 +      struct ad5933_state *st = iio_priv(indio_dev);
 +
 +      /* AD5933_CTRL_INIT_START_FREQ:
 +       * High Q complex circuits require a long time to reach steady state.
 +       * To facilitate the measurement of such impedances, this mode allows
 +       * the user full control of the settling time requirement before
 +       * entering start frequency sweep mode where the impedance measurement
 +       * takes place. In this mode the impedance is excited with the
 +       * programmed start frequency (ad5933_ring_preenable),
 +       * but no measurement takes place.
 +       */
 +
 +      schedule_delayed_work(&st->work,
 +                            msecs_to_jiffies(AD5933_INIT_EXCITATION_TIME_ms));
 +      return 0;
 +}
 +
 +static int ad5933_ring_postdisable(struct iio_dev *indio_dev)
 +{
 +      struct ad5933_state *st = iio_priv(indio_dev);
 +
 +      cancel_delayed_work_sync(&st->work);
 +      return ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
 +}
 +
 +static const struct iio_ring_setup_ops ad5933_ring_setup_ops = {
 +      .preenable = &ad5933_ring_preenable,
 +      .postenable = &ad5933_ring_postenable,
 +      .postdisable = &ad5933_ring_postdisable,
 +};
 +
 +static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
 +{
 +      indio_dev->ring = iio_sw_rb_allocate(indio_dev);
 +      if (!indio_dev->ring)
 +              return -ENOMEM;
 +
 +      /* Effectively select the ring buffer implementation */
 +      indio_dev->ring->access = &ring_sw_access_funcs;
 +
 +      /* Ring buffer functions - here trigger setup related */
 +      indio_dev->ring->setup_ops = &ad5933_ring_setup_ops;
 +
 +      indio_dev->modes |= INDIO_RING_HARDWARE_BUFFER;
 +
 +      return 0;
 +}
 +
 +static void ad5933_work(struct work_struct *work)
 +{
 +      struct ad5933_state *st = container_of(work,
 +              struct ad5933_state, work.work);
 +      struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
 +      struct iio_ring_buffer *ring = indio_dev->ring;
 +      signed short buf[2];
 +      unsigned char status;
 +
 +      mutex_lock(&indio_dev->mlock);
 +      if (st->state == AD5933_CTRL_INIT_START_FREQ) {
 +              /* start sweep */
 +              ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
 +              st->state = AD5933_CTRL_START_SWEEP;
 +              schedule_delayed_work(&st->work, st->poll_time_jiffies);
 +              mutex_unlock(&indio_dev->mlock);
 +              return;
 +      }
 +
 +      ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
 +
 +      if (status & AD5933_STAT_DATA_VALID) {
 +              ad5933_i2c_read(st->client,
 +                              (ring->scan_mask & (1 << 0)) ?
 +                              AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
 +                              ring->scan_count * 2, (u8 *)buf);
 +
 +              if (ring->scan_count == 2) {
 +                      buf[0] = be16_to_cpu(buf[0]);
 +                      buf[1] = be16_to_cpu(buf[1]);
 +              } else {
 +                      buf[0] = be16_to_cpu(buf[0]);
 +              }
 +              /* save datum to the ring */
 +              ring->access->store_to(ring, (u8 *)buf, iio_get_time_ns());
 +      } else {
 +              /* no data available - try again later */
 +              schedule_delayed_work(&st->work, st->poll_time_jiffies);
 +              mutex_unlock(&indio_dev->mlock);
 +              return;
 +      }
 +
 +      if (status & AD5933_STAT_SWEEP_DONE) {
 +              /* last sample received - power down do nothing until
 +               * the ring enable is toggled */
 +              ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
 +      } else {
 +              /* we just received a valid datum, move on to the next */
 +              ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
 +              schedule_delayed_work(&st->work, st->poll_time_jiffies);
 +      }
 +
 +      mutex_unlock(&indio_dev->mlock);
 +}
 +
 +static int __devinit ad5933_probe(struct i2c_client *client,
 +                                 const struct i2c_device_id *id)
 +{
 +      int ret, regdone = 0, voltage_uv = 0;
 +      struct ad5933_platform_data *pdata = client->dev.platform_data;
 +      struct ad5933_state *st;
 +      struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
 +      if (indio_dev == NULL)
 +              return -ENOMEM;
 +
 +      st = iio_priv(indio_dev);
 +      i2c_set_clientdata(client, indio_dev);
 +      st->client = client;
 +
 +      if (!pdata)
 +              st->pdata = &ad5933_default_pdata;
 +      else
 +              st->pdata = pdata;
 +
 +      st->reg = regulator_get(&client->dev, "vcc");
 +      if (!IS_ERR(st->reg)) {
 +              ret = regulator_enable(st->reg);
 +              if (ret)
 +                      goto error_put_reg;
 +              voltage_uv = regulator_get_voltage(st->reg);
 +      }
 +
 +      if (voltage_uv)
 +              st->vref_mv = voltage_uv / 1000;
 +      else
 +              st->vref_mv = st->pdata->vref_mv;
 +
 +      if (st->pdata->ext_clk_Hz) {
 +              st->mclk_hz = st->pdata->ext_clk_Hz;
 +              st->ctrl_lb = AD5933_CTRL_EXT_SYSCLK;
 +      } else {
 +              st->mclk_hz = AD5933_INT_OSC_FREQ_Hz;
 +              st->ctrl_lb = AD5933_CTRL_INT_SYSCLK;
 +      }
 +
 +      ad5933_calc_out_ranges(st);
 +      INIT_DELAYED_WORK(&st->work, ad5933_work);
 +      st->poll_time_jiffies = msecs_to_jiffies(AD5933_POLL_TIME_ms);
 +
 +      indio_dev->dev.parent = &client->dev;
 +      indio_dev->info = &ad5933_info;
 +      indio_dev->name = id->name;
 +      indio_dev->modes = INDIO_DIRECT_MODE;
 +      indio_dev->channels = ad5933_channels;
 +      indio_dev->num_channels = 1; /* only register temp0_input */
 +
 +      ret = ad5933_register_ring_funcs_and_init(indio_dev);
 +      if (ret)
 +              goto error_disable_reg;
 +
 +      ret = iio_device_register(indio_dev);
 +      if (ret)
 +              goto error_unreg_ring;
 +      regdone = 1;
 +
 +      /* skip temp0_input, register in0_(real|imag)_raw */
 +      ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
 +                                        &ad5933_channels[1],
 +                                        2);
 +      if (ret)
 +              goto error_unreg_ring;
 +
 +      /* enable both REAL and IMAG channels by default */
 +      iio_scan_mask_set(indio_dev->ring, 0);
 +      iio_scan_mask_set(indio_dev->ring, 1);
 +
 +      ret = ad5933_setup(st);
 +      if (ret)
 +              goto error_uninitialize_ring;
 +
 +      return 0;
 +
 +error_uninitialize_ring:
 +      iio_ring_buffer_unregister(indio_dev->ring);
 +error_unreg_ring:
 +      iio_sw_rb_free(indio_dev->ring);
 +error_disable_reg:
 +      if (!IS_ERR(st->reg))
 +              regulator_disable(st->reg);
 +error_put_reg:
 +      if (!IS_ERR(st->reg))
 +              regulator_put(st->reg);
 +
 +      if (regdone)
 +              iio_device_unregister(indio_dev);
 +      else
 +              iio_free_device(indio_dev);
 +
 +      return ret;
 +}
 +
 +static __devexit int ad5933_remove(struct i2c_client *client)
 +{
 +      struct iio_dev *indio_dev = i2c_get_clientdata(client);
 +      struct ad5933_state *st = iio_priv(indio_dev);
 +
 +      iio_ring_buffer_unregister(indio_dev->ring);
 +      iio_sw_rb_free(indio_dev->ring);
 +      if (!IS_ERR(st->reg)) {
 +              regulator_disable(st->reg);
 +              regulator_put(st->reg);
 +      }
 +      iio_device_unregister(indio_dev);
 +
 +      return 0;
 +}
 +
 +static const struct i2c_device_id ad5933_id[] = {
 +      { "ad5933", 0 },
 +      { "ad5934", 0 },
 +      {}
 +};
 +
 +MODULE_DEVICE_TABLE(i2c, ad5933_id);
 +
 +static struct i2c_driver ad5933_driver = {
 +      .driver = {
 +              .name = "ad5933",
 +      },
 +      .probe = ad5933_probe,
 +      .remove = __devexit_p(ad5933_remove),
 +      .id_table = ad5933_id,
 +};
 +
 +static __init int ad5933_init(void)
 +{
 +      return i2c_add_driver(&ad5933_driver);
 +}
 +module_init(ad5933_init);
 +
 +static __exit void ad5933_exit(void)
 +{
 +      i2c_del_driver(&ad5933_driver);
 +}
 +module_exit(ad5933_exit);
 +
 +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 +MODULE_DESCRIPTION("Analog Devices AD5933 Impedance Conv. Network Analyzer");
 +MODULE_LICENSE("GPL v2");
index 3fc24037db29f35129fdec3f7f02548e224d92fe,9fcad468b18cb97f885563265280547cb60ccd6d..8397d723b43c1d82d493dc14a12de38e4cb8c076
@@@ -3,11 -6,17 +3,12 @@@
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/bitops.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../ring_sw.h"
 -#include "../accel/accel.h"
 -#include "../trigger.h"
 +#include "../trigger_consumer.h"
  #include "adis16400.h"
  
  /**
index 3860d92f1ae742514da52d6846566dd5d6afc7a8,e5d7f962fd423bbfe78b267702be55d22b06d01d..aef39c508d9b5c5b86ec4e1cb5db2eca68d05cfb
@@@ -1,8 -1,15 +1,9 @@@
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "adis16400.h"
  
index dce50b1a4ee99bd6cc4ff7d99398643046d75656,704da4a9265356583fef0b31550fb59ce9ad15d0..859961608c994d9038ca64e90d1aaddb66f20d78
  #include <linux/cdev.h>
  #include <linux/slab.h>
  #include <linux/poll.h>
+ #include <linux/export.h>
  
  #include "iio.h"
 +#include "iio_core.h"
  #include "ring_generic.h"
  
  /**
index 1e2a09643a350e16765f23fd9b499b1bdd6ba6f4,1649f5bd1737f54384b9616b032ae82c247d79a9..fa1b1cd1a0070184d855eafe6ce425b42844918b
@@@ -5,7 -5,13 +5,8 @@@
   *
   * Licensed under the GPL-2.
   */
+ #include <linux/export.h>
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/gpio.h>
 -#include <linux/workqueue.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
  #include <linux/spi/spi.h>
  #include <linux/slab.h>
index 392dfe30244335ca5ddab0764f0ee4fa58ed8dc8,851b7458cade4f62f2e0ad889484c15d3f14112a..b6569c706651dffb8a0776ebf92aa0b9a0ea449c
@@@ -7,10 -7,17 +7,11 @@@
   */
  
  #include <linux/interrupt.h>
 -#include <linux/irq.h>
 -#include <linux/mutex.h>
 -#include <linux/device.h>
  #include <linux/kernel.h>
 -#include <linux/sysfs.h>
 -#include <linux/list.h>
  #include <linux/spi/spi.h>
+ #include <linux/export.h>
  
  #include "../iio.h"
 -#include "../sysfs.h"
  #include "../trigger.h"
  #include "ade7758.h"
  
index 3b32f9e6e4f09a38f62afb07038286790e2d8f60,0000000000000000000000000000000000000000..87c9cdc8bd29866446783f85e09d4c923ed9c7f5
mode 100644,000000..100644
--- /dev/null
@@@ -1,2234 -1,0 +1,2235 @@@
 +/* Driver for Realtek RTS51xx USB card reader
 + *
 + * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
 + *
 + * This program is free software; you can redistribute it and/or modify it
 + * under the terms of the GNU General Public License as published by the
 + * Free Software Foundation; either version 2, or (at your option) any
 + * later version.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License along
 + * with this program; if not, see <http://www.gnu.org/licenses/>.
 + *
 + * Author:
 + *   wwang (wei_wang@realsil.com.cn)
 + *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
 + * Maintainer:
 + *   Edwin Rong (edwin_rong@realsil.com.cn)
 + *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
 + */
 +
 +#include <linux/blkdev.h>
 +#include <linux/kthread.h>
 +#include <linux/sched.h>
 +#include <linux/slab.h>
 +#include <linux/vmalloc.h>
++#include <linux/export.h>
 +
 +#include <scsi/scsi.h>
 +#include <scsi/scsi_eh.h>
 +#include <scsi/scsi_device.h>
 +
 +#include "debug.h"
 +#include "rts51x.h"
 +#include "rts51x_chip.h"
 +#include "rts51x_scsi.h"
 +#include "rts51x_card.h"
 +#include "rts51x_transport.h"
 +#include "rts51x_sys.h"
 +#include "sd_cprm.h"
 +#include "ms_mg.h"
 +#include "trace.h"
 +
 +void scsi_show_command(struct scsi_cmnd *srb)
 +{
 +      char *what = NULL;
 +      int i, unknown_cmd = 0;
 +
 +      switch (srb->cmnd[0]) {
 +      case TEST_UNIT_READY:
 +              what = (char *)"TEST_UNIT_READY";
 +              break;
 +      case REZERO_UNIT:
 +              what = (char *)"REZERO_UNIT";
 +              break;
 +      case REQUEST_SENSE:
 +              what = (char *)"REQUEST_SENSE";
 +              break;
 +      case FORMAT_UNIT:
 +              what = (char *)"FORMAT_UNIT";
 +              break;
 +      case READ_BLOCK_LIMITS:
 +              what = (char *)"READ_BLOCK_LIMITS";
 +              break;
 +      case 0x07:
 +              what = (char *)"REASSIGN_BLOCKS";
 +              break;
 +      case READ_6:
 +              what = (char *)"READ_6";
 +              break;
 +      case WRITE_6:
 +              what = (char *)"WRITE_6";
 +              break;
 +      case SEEK_6:
 +              what = (char *)"SEEK_6";
 +              break;
 +      case READ_REVERSE:
 +              what = (char *)"READ_REVERSE";
 +              break;
 +      case WRITE_FILEMARKS:
 +              what = (char *)"WRITE_FILEMARKS";
 +              break;
 +      case SPACE:
 +              what = (char *)"SPACE";
 +              break;
 +      case INQUIRY:
 +              what = (char *)"INQUIRY";
 +              break;
 +      case RECOVER_BUFFERED_DATA:
 +              what = (char *)"RECOVER_BUFFERED_DATA";
 +              break;
 +      case MODE_SELECT:
 +              what = (char *)"MODE_SELECT";
 +              break;
 +      case RESERVE:
 +              what = (char *)"RESERVE";
 +              break;
 +      case RELEASE:
 +              what = (char *)"RELEASE";
 +              break;
 +      case COPY:
 +              what = (char *)"COPY";
 +              break;
 +      case ERASE:
 +              what = (char *)"ERASE";
 +              break;
 +      case MODE_SENSE:
 +              what = (char *)"MODE_SENSE";
 +              break;
 +      case START_STOP:
 +              what = (char *)"START_STOP";
 +              break;
 +      case RECEIVE_DIAGNOSTIC:
 +              what = (char *)"RECEIVE_DIAGNOSTIC";
 +              break;
 +      case SEND_DIAGNOSTIC:
 +              what = (char *)"SEND_DIAGNOSTIC";
 +              break;
 +      case ALLOW_MEDIUM_REMOVAL:
 +              what = (char *)"ALLOW_MEDIUM_REMOVAL";
 +              break;
 +      case SET_WINDOW:
 +              what = (char *)"SET_WINDOW";
 +              break;
 +      case READ_CAPACITY:
 +              what = (char *)"READ_CAPACITY";
 +              break;
 +      case READ_10:
 +              what = (char *)"READ_10";
 +              break;
 +      case WRITE_10:
 +              what = (char *)"WRITE_10";
 +              break;
 +      case SEEK_10:
 +              what = (char *)"SEEK_10";
 +              break;
 +      case WRITE_VERIFY:
 +              what = (char *)"WRITE_VERIFY";
 +              break;
 +      case VERIFY:
 +              what = (char *)"VERIFY";
 +              break;
 +      case SEARCH_HIGH:
 +              what = (char *)"SEARCH_HIGH";
 +              break;
 +      case SEARCH_EQUAL:
 +              what = (char *)"SEARCH_EQUAL";
 +              break;
 +      case SEARCH_LOW:
 +              what = (char *)"SEARCH_LOW";
 +              break;
 +      case SET_LIMITS:
 +              what = (char *)"SET_LIMITS";
 +              break;
 +      case READ_POSITION:
 +              what = (char *)"READ_POSITION";
 +              break;
 +      case SYNCHRONIZE_CACHE:
 +              what = (char *)"SYNCHRONIZE_CACHE";
 +              break;
 +      case LOCK_UNLOCK_CACHE:
 +              what = (char *)"LOCK_UNLOCK_CACHE";
 +              break;
 +      case READ_DEFECT_DATA:
 +              what = (char *)"READ_DEFECT_DATA";
 +              break;
 +      case MEDIUM_SCAN:
 +              what = (char *)"MEDIUM_SCAN";
 +              break;
 +      case COMPARE:
 +              what = (char *)"COMPARE";
 +              break;
 +      case COPY_VERIFY:
 +              what = (char *)"COPY_VERIFY";
 +              break;
 +      case WRITE_BUFFER:
 +              what = (char *)"WRITE_BUFFER";
 +              break;
 +      case READ_BUFFER:
 +              what = (char *)"READ_BUFFER";
 +              break;
 +      case UPDATE_BLOCK:
 +              what = (char *)"UPDATE_BLOCK";
 +              break;
 +      case READ_LONG:
 +              what = (char *)"READ_LONG";
 +              break;
 +      case WRITE_LONG:
 +              what = (char *)"WRITE_LONG";
 +              break;
 +      case CHANGE_DEFINITION:
 +              what = (char *)"CHANGE_DEFINITION";
 +              break;
 +      case WRITE_SAME:
 +              what = (char *)"WRITE_SAME";
 +              break;
 +      case GPCMD_READ_SUBCHANNEL:
 +              what = (char *)"READ SUBCHANNEL";
 +              break;
 +      case READ_TOC:
 +              what = (char *)"READ_TOC";
 +              break;
 +      case GPCMD_READ_HEADER:
 +              what = (char *)"READ HEADER";
 +              break;
 +      case GPCMD_PLAY_AUDIO_10:
 +              what = (char *)"PLAY AUDIO (10)";
 +              break;
 +      case GPCMD_PLAY_AUDIO_MSF:
 +              what = (char *)"PLAY AUDIO MSF";
 +              break;
 +      case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
 +              what = (char *)"GET EVENT/STATUS NOTIFICATION";
 +              break;
 +      case GPCMD_PAUSE_RESUME:
 +              what = (char *)"PAUSE/RESUME";
 +              break;
 +      case LOG_SELECT:
 +              what = (char *)"LOG_SELECT";
 +              break;
 +      case LOG_SENSE:
 +              what = (char *)"LOG_SENSE";
 +              break;
 +      case GPCMD_STOP_PLAY_SCAN:
 +              what = (char *)"STOP PLAY/SCAN";
 +              break;
 +      case GPCMD_READ_DISC_INFO:
 +              what = (char *)"READ DISC INFORMATION";
 +              break;
 +      case GPCMD_READ_TRACK_RZONE_INFO:
 +              what = (char *)"READ TRACK INFORMATION";
 +              break;
 +      case GPCMD_RESERVE_RZONE_TRACK:
 +              what = (char *)"RESERVE TRACK";
 +              break;
 +      case GPCMD_SEND_OPC:
 +              what = (char *)"SEND OPC";
 +              break;
 +      case MODE_SELECT_10:
 +              what = (char *)"MODE_SELECT_10";
 +              break;
 +      case GPCMD_REPAIR_RZONE_TRACK:
 +              what = (char *)"REPAIR TRACK";
 +              break;
 +      case 0x59:
 +              what = (char *)"READ MASTER CUE";
 +              break;
 +      case MODE_SENSE_10:
 +              what = (char *)"MODE_SENSE_10";
 +              break;
 +      case GPCMD_CLOSE_TRACK:
 +              what = (char *)"CLOSE TRACK/SESSION";
 +              break;
 +      case 0x5C:
 +              what = (char *)"READ BUFFER CAPACITY";
 +              break;
 +      case 0x5D:
 +              what = (char *)"SEND CUE SHEET";
 +              break;
 +      case GPCMD_BLANK:
 +              what = (char *)"BLANK";
 +              break;
 +      case REPORT_LUNS:
 +              what = (char *)"REPORT LUNS";
 +              break;
 +      case MOVE_MEDIUM:
 +              what = (char *)"MOVE_MEDIUM or PLAY AUDIO (12)";
 +              break;
 +      case READ_12:
 +              what = (char *)"READ_12";
 +              break;
 +      case WRITE_12:
 +              what = (char *)"WRITE_12";
 +              break;
 +      case WRITE_VERIFY_12:
 +              what = (char *)"WRITE_VERIFY_12";
 +              break;
 +      case SEARCH_HIGH_12:
 +              what = (char *)"SEARCH_HIGH_12";
 +              break;
 +      case SEARCH_EQUAL_12:
 +              what = (char *)"SEARCH_EQUAL_12";
 +              break;
 +      case SEARCH_LOW_12:
 +              what = (char *)"SEARCH_LOW_12";
 +              break;
 +      case SEND_VOLUME_TAG:
 +              what = (char *)"SEND_VOLUME_TAG";
 +              break;
 +      case READ_ELEMENT_STATUS:
 +              what = (char *)"READ_ELEMENT_STATUS";
 +              break;
 +      case GPCMD_READ_CD_MSF:
 +              what = (char *)"READ CD MSF";
 +              break;
 +      case GPCMD_SCAN:
 +              what = (char *)"SCAN";
 +              break;
 +      case GPCMD_SET_SPEED:
 +              what = (char *)"SET CD SPEED";
 +              break;
 +      case GPCMD_MECHANISM_STATUS:
 +              what = (char *)"MECHANISM STATUS";
 +              break;
 +      case GPCMD_READ_CD:
 +              what = (char *)"READ CD";
 +              break;
 +      case 0xE1:
 +              what = (char *)"WRITE CONTINUE";
 +              break;
 +      case WRITE_LONG_2:
 +              what = (char *)"WRITE_LONG_2";
 +              break;
 +      case VENDOR_CMND:
 +              what = (char *)"Realtek's vendor command";
 +              break;
 +      default:
 +              what = (char *)"(unknown command)";
 +              unknown_cmd = 1;
 +              break;
 +      }
 +
 +      if (srb->cmnd[0] != TEST_UNIT_READY)
 +              RTS51X_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
 +      if (unknown_cmd) {
 +              RTS51X_DEBUGP("");
 +              for (i = 0; i < srb->cmd_len && i < 16; i++)
 +                      RTS51X_DEBUGPN(" %02x", srb->cmnd[i]);
 +              RTS51X_DEBUGPN("\n");
 +      }
 +}
 +
 +void set_sense_type(struct rts51x_chip *chip, unsigned int lun, int sense_type)
 +{
 +      switch (sense_type) {
 +      case SENSE_TYPE_MEDIA_CHANGE:
 +              set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MEDIA_NOT_PRESENT:
 +              set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
 +              set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
 +              set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MEDIA_WRITE_PROTECT:
 +              set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
 +              set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MEDIA_WRITE_ERR:
 +              set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
 +              set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
 +                             ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
 +              break;
 +
 +      case SENSE_TYPE_FORMAT_IN_PROGRESS:
 +              set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_FORMAT_CMD_FAILED:
 +              set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
 +              break;
 +
 +#ifdef SUPPORT_MAGIC_GATE
 +      case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
 +              set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
 +              set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
 +              set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
 +              break;
 +
 +      case SENSE_TYPE_MG_WRITE_ERR:
 +              set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
 +              break;
 +#endif
 +
 +#ifdef SUPPORT_SD_LOCK
 +      case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
 +              set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
 +              break;
 +#endif
 +
 +      case SENSE_TYPE_NO_SENSE:
 +      default:
 +              set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
 +              break;
 +      }
 +}
 +
 +void set_sense_data(struct rts51x_chip *chip, unsigned int lun, u8 err_code,
 +                  u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
 +                  u16 sns_key_info1)
 +{
 +      struct sense_data_t *sense = &(chip->sense_buffer[lun]);
 +
 +      sense->err_code = err_code;
 +      sense->sense_key = sense_key;
 +      sense->info[0] = (u8) (info >> 24);
 +      sense->info[1] = (u8) (info >> 16);
 +      sense->info[2] = (u8) (info >> 8);
 +      sense->info[3] = (u8) info;
 +
 +      sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
 +      sense->asc = asc;
 +      sense->ascq = ascq;
 +      if (sns_key_info0 != 0) {
 +              sense->sns_key_info[0] = SKSV | sns_key_info0;
 +              sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
 +              sense->sns_key_info[2] = sns_key_info1 & 0x0f;
 +      }
 +}
 +
 +static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +
 +      rts51x_init_cards(chip);
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              return TRANSPORT_FAILED;
 +      }
 +
 +      if (!check_lun_mc(chip, lun)) {
 +              set_lun_mc(chip, lun);
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
 +              return TRANSPORT_FAILED;
 +      }
 +#ifdef SUPPORT_SD_LOCK
 +      if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
 +              struct sd_info *sd_card = &(chip->sd_card);
 +              if (sd_card->sd_lock_notify) {
 +                      sd_card->sd_lock_notify = 0;
 +                      set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
 +                      return TRANSPORT_FAILED;
 +              } else if (sd_card->sd_lock_status & SD_LOCKED) {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_READ_FORBIDDEN);
 +                      return TRANSPORT_FAILED;
 +              }
 +      }
 +#endif
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +unsigned char formatter_inquiry_str[20] = {
 +      'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
 +      '-', 'M', 'G',          /* Byte[47:49] */
 +      0x0B,                   /* Byte[50]: MG, MS, MSPro, MSXC */
 +      0x00,                   /* Byte[51]: Category Specific Commands */
 +      0x00,                   /* Byte[52]: Access Control and feature */
 +      0x20, 0x20, 0x20,       /* Byte[53:55] */
 +};
 +
 +static int inquiry(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +      char *inquiry_default = (char *)"Generic-xD/SD/M.S.      1.00 ";
 +      char *inquiry_string;
 +      unsigned char sendbytes;
 +      unsigned char *buf;
 +      u8 card = get_lun_card(chip, lun);
 +      int pro_formatter_flag = 0;
 +      unsigned char inquiry_buf[] = {
 +              QULIFIRE | DRCT_ACCESS_DEV,
 +              RMB_DISC | 0x0D,
 +              0x00,
 +              0x01,
 +              0x1f,
 +              0x02,
 +              0,
 +              REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE,
 +      };
 +
 +      inquiry_string = inquiry_default;
 +
 +      buf = vmalloc(scsi_bufflen(srb));
 +      if (buf == NULL)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      if (MS_FORMATTER_ENABLED(chip) && (get_lun2card(chip, lun) & MS_CARD)) {
 +              if (!card || (card == MS_CARD))
 +                      pro_formatter_flag = 1;
 +      }
 +
 +      if (pro_formatter_flag) {
 +              if (scsi_bufflen(srb) < 56)
 +                      sendbytes = (unsigned char)(scsi_bufflen(srb));
 +              else
 +                      sendbytes = 56;
 +      } else {
 +              if (scsi_bufflen(srb) < 36)
 +                      sendbytes = (unsigned char)(scsi_bufflen(srb));
 +              else
 +                      sendbytes = 36;
 +      }
 +
 +      if (sendbytes > 8) {
 +              memcpy(buf, inquiry_buf, 8);
 +              memcpy(buf + 8, inquiry_string, sendbytes - 8);
 +              if (pro_formatter_flag)
 +                      buf[4] = 0x33;  /* Additional Length */
 +      } else {
 +              memcpy(buf, inquiry_buf, sendbytes);
 +      }
 +
 +      if (pro_formatter_flag) {
 +              if (sendbytes > 36)
 +                      memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
 +      }
 +
 +      scsi_set_resid(srb, 0);
 +
 +      rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
 +      vfree(buf);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int start_stop_unit(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +
 +      scsi_set_resid(srb, scsi_bufflen(srb));
 +
 +      if (srb->cmnd[1] == 1)
 +              return TRANSPORT_GOOD;
 +
 +      switch (srb->cmnd[0x4]) {
 +      case STOP_MEDIUM:
 +              /* Media disabled */
 +              return TRANSPORT_GOOD;
 +
 +      case UNLOAD_MEDIUM:
 +              /* Media shall be unload */
 +              if (check_card_ready(chip, lun))
 +                      eject_card(chip, lun);
 +              return TRANSPORT_GOOD;
 +
 +      case MAKE_MEDIUM_READY:
 +      case LOAD_MEDIUM:
 +              if (check_card_ready(chip, lun)) {
 +                      return TRANSPORT_GOOD;
 +              } else {
 +                      set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +
 +              break;
 +      }
 +
 +      TRACE_RET(chip, TRANSPORT_ERROR);
 +}
 +
 +static int allow_medium_removal(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int prevent;
 +
 +      prevent = srb->cmnd[4] & 0x1;
 +
 +      scsi_set_resid(srb, 0);
 +
 +      if (prevent) {
 +              set_sense_type(chip, SCSI_LUN(srb),
 +                             SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static void ms_mode_sense(struct rts51x_chip *chip, u8 cmd,
 +                        int lun, u8 *buf, int buf_len)
 +{
 +      struct ms_info *ms_card = &(chip->ms_card);
 +      int sys_info_offset;
 +      int data_size = buf_len;
 +      int support_format = 0;
 +      int i = 0;
 +
 +      if (cmd == MODE_SENSE) {
 +              sys_info_offset = 8;
 +              if (data_size > 0x68)
 +                      data_size = 0x68;
 +              buf[i++] = 0x67;        /* Mode Data Length */
 +      } else {
 +              sys_info_offset = 12;
 +              if (data_size > 0x6C)
 +                      data_size = 0x6C;
 +              buf[i++] = 0x00;        /* Mode Data Length (MSB) */
 +              buf[i++] = 0x6A;        /* Mode Data Length (LSB) */
 +      }
 +
 +      /* Medium Type Code */
 +      if (check_card_ready(chip, lun)) {
 +              if (CHK_MSXC(ms_card)) {
 +                      support_format = 1;
 +                      buf[i++] = 0x40;
 +              } else if (CHK_MSPRO(ms_card)) {
 +                      support_format = 1;
 +                      buf[i++] = 0x20;
 +              } else {
 +                      buf[i++] = 0x10;
 +              }
 +
 +              /* WP */
 +              if (check_card_wp(chip, lun))
 +                      buf[i++] = 0x80;
 +              else
 +                      buf[i++] = 0x00;
 +      } else {
 +              buf[i++] = 0x00;        /* MediaType */
 +              buf[i++] = 0x00;        /* WP */
 +      }
 +
 +      buf[i++] = 0x00;        /* Reserved */
 +
 +      if (cmd == MODE_SENSE_10) {
 +              buf[i++] = 0x00;        /* Reserved */
 +              buf[i++] = 0x00;        /* Block descriptor length(MSB) */
 +              buf[i++] = 0x00;        /* Block descriptor length(LSB) */
 +
 +              /* The Following Data is the content of "Page 0x20" */
 +              if (data_size >= 9)
 +                      buf[i++] = 0x20;        /* Page Code */
 +              if (data_size >= 10)
 +                      buf[i++] = 0x62;        /* Page Length */
 +              if (data_size >= 11)
 +                      buf[i++] = 0x00;        /* No Access Control */
 +              if (data_size >= 12) {
 +                      if (support_format)
 +                              buf[i++] = 0xC0;        /* SF, SGM */
 +                      else
 +                              buf[i++] = 0x00;
 +              }
 +      } else {
 +              /* The Following Data is the content of "Page 0x20" */
 +              if (data_size >= 5)
 +                      buf[i++] = 0x20;        /* Page Code */
 +              if (data_size >= 6)
 +                      buf[i++] = 0x62;        /* Page Length */
 +              if (data_size >= 7)
 +                      buf[i++] = 0x00;        /* No Access Control */
 +              if (data_size >= 8) {
 +                      if (support_format)
 +                              buf[i++] = 0xC0;        /* SF, SGM */
 +                      else
 +                              buf[i++] = 0x00;
 +              }
 +      }
 +
 +      if (data_size > sys_info_offset) {
 +              /* 96 Bytes Attribute Data */
 +              int len = data_size - sys_info_offset;
 +              len = (len < 96) ? len : 96;
 +
 +              memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
 +      }
 +}
 +
 +static int mode_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +      unsigned int dataSize;
 +      int status;
 +      int pro_formatter_flag;
 +      unsigned char pageCode, *buf;
 +      u8 card = get_lun_card(chip, lun);
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              scsi_set_resid(srb, scsi_bufflen(srb));
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      pro_formatter_flag = 0;
 +      dataSize = 8;
 +      /* In Combo mode, device responses ModeSense command as a MS LUN
 +       * when no card is inserted */
 +      if ((get_lun2card(chip, lun) & MS_CARD)) {
 +              if (!card || (card == MS_CARD)) {
 +                      dataSize = 108;
 +                      if (chip->option.mspro_formatter_enable)
 +                              pro_formatter_flag = 1;
 +              }
 +      }
 +
 +      buf = kmalloc(dataSize, GFP_KERNEL);
 +      if (buf == NULL)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      pageCode = srb->cmnd[2] & 0x3f;
 +
 +      if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
 +          (pageCode == 0x00) || (pro_formatter_flag && (pageCode == 0x20))) {
 +              if (srb->cmnd[0] == MODE_SENSE) {
 +                      if ((pageCode == 0x3F) || (pageCode == 0x20)) {
 +                              ms_mode_sense(chip, srb->cmnd[0], lun, buf,
 +                                            dataSize);
 +                      } else {
 +                              dataSize = 4;
 +                              buf[0] = 0x03;
 +                              buf[1] = 0x00;
 +                              if (check_card_wp(chip, lun))
 +                                      buf[2] = 0x80;
 +                              else
 +                              buf[3] = 0x00;
 +                      }
 +              } else {
 +                      if ((pageCode == 0x3F) || (pageCode == 0x20)) {
 +                              ms_mode_sense(chip, srb->cmnd[0], lun, buf,
 +                                            dataSize);
 +                      } else {
 +                              dataSize = 8;
 +                              buf[0] = 0x00;
 +                              buf[1] = 0x06;
 +                              buf[2] = 0x00;
 +                              if (check_card_wp(chip, lun))
 +                                      buf[3] = 0x80;
 +                              else
 +                                      buf[3] = 0x00;
 +                              buf[4] = 0x00;
 +                              buf[5] = 0x00;
 +                              buf[6] = 0x00;
 +                              buf[7] = 0x00;
 +                      }
 +              }
 +              status = TRANSPORT_GOOD;
 +      } else {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              scsi_set_resid(srb, scsi_bufflen(srb));
 +              status = TRANSPORT_FAILED;
 +      }
 +
 +      if (status == TRANSPORT_GOOD) {
 +              unsigned int len = min(scsi_bufflen(srb), dataSize);
 +              rts51x_set_xfer_buf(buf, len, srb);
 +              scsi_set_resid(srb, scsi_bufflen(srb) - len);
 +      }
 +      kfree(buf);
 +
 +      return status;
 +}
 +
 +static int request_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      struct sense_data_t *sense;
 +      unsigned int lun = SCSI_LUN(srb);
 +      struct ms_info *ms_card = &(chip->ms_card);
 +      unsigned char *tmp, *buf;
 +
 +      sense = &(chip->sense_buffer[lun]);
 +
 +      if ((get_lun_card(chip, lun) == MS_CARD)
 +          && PRO_UNDER_FORMATTING(ms_card)) {
 +              mspro_format_sense(chip, lun);
 +      }
 +
 +      buf = vmalloc(scsi_bufflen(srb));
 +      if (buf == NULL)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      tmp = (unsigned char *)sense;
 +      memcpy(buf, tmp, scsi_bufflen(srb));
 +
 +      rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
 +      vfree(buf);
 +
 +      scsi_set_resid(srb, 0);
 +      /* Reset Sense Data */
 +      set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int read_write(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +#ifdef SUPPORT_SD_LOCK
 +      struct sd_info *sd_card = &(chip->sd_card);
 +#endif
 +      unsigned int lun = SCSI_LUN(srb);
 +      int retval;
 +      u32 start_sec;
 +      u16 sec_cnt;
 +
 +      if (!check_card_ready(chip, lun) || (chip->capacity[lun] == 0)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (!check_lun_mc(chip, lun)) {
 +              set_lun_mc(chip, lun);
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
 +              return TRANSPORT_FAILED;
 +      }
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +#ifdef SUPPORT_SD_LOCK
 +      if (sd_card->sd_erase_status) {
 +              /* Accessing to any card is forbidden
 +               * until the erase procedure of SD is completed */
 +              RTS51X_DEBUGP("SD card being erased!\n");
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (get_lun_card(chip, lun) == SD_CARD) {
 +              if (sd_card->sd_lock_status & SD_LOCKED) {
 +                      RTS51X_DEBUGP("SD card locked!\n");
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_READ_FORBIDDEN);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +      }
 +#endif
 +
 +      if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
 +              start_sec =
 +                  ((u32) srb->cmnd[2] << 24) |
 +                  ((u32) srb->cmnd[3] << 16) |
 +                  ((u32) srb->cmnd[4] << 8) |
 +                  ((u32) srb->cmnd[5]);
 +              sec_cnt = ((u16) (srb->cmnd[7]) << 8) | srb->cmnd[8];
 +      } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
 +              start_sec = ((u32) (srb->cmnd[1] & 0x1F) << 16) |
 +                  ((u32) srb->cmnd[2] << 8) | ((u32) srb->cmnd[3]);
 +              sec_cnt = srb->cmnd[4];
 +      } else if ((srb->cmnd[0] == VENDOR_CMND) &&
 +                      (srb->cmnd[1] == SCSI_APP_CMD) &&
 +                      ((srb->cmnd[2] == PP_READ10) ||
 +                       (srb->cmnd[2] == PP_WRITE10))) {
 +              start_sec = ((u32) srb->cmnd[4] << 24) |
 +                      ((u32) srb->cmnd[5] << 16) |
 +                      ((u32) srb->cmnd[6] << 8) |
 +                      ((u32) srb->cmnd[7]);
 +              sec_cnt = ((u16) (srb->cmnd[9]) << 8) | srb->cmnd[10];
 +      } else {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if ((start_sec > chip->capacity[lun]) ||
 +          ((start_sec + sec_cnt) > chip->capacity[lun])) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (sec_cnt == 0) {
 +              scsi_set_resid(srb, 0);
 +              return TRANSPORT_GOOD;
 +      }
 +
 +      if ((srb->sc_data_direction == DMA_TO_DEVICE)
 +          && check_card_wp(chip, lun)) {
 +              RTS51X_DEBUGP("Write protected card!\n");
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      retval = card_rw(srb, chip, start_sec, sec_cnt);
 +      if (retval != STATUS_SUCCESS) {
 +#if 0
 +              if (chip->need_release & chip->lun2card[lun]) {
 +                      set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              } else {
 +#endif
 +              if (srb->sc_data_direction == DMA_FROM_DEVICE) {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 +              } else {
 +                      set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
 +              }
 +#if 0
 +              }
 +#endif
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      scsi_set_resid(srb, 0);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int read_format_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned char *buf;
 +      unsigned int lun = SCSI_LUN(srb);
 +      unsigned int buf_len;
 +      u8 card = get_lun_card(chip, lun);
 +      int desc_cnt;
 +      int i = 0;
 +
 +      if (!check_card_ready(chip, lun)) {
 +              if (!chip->option.mspro_formatter_enable) {
 +                      set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +      }
 +
 +      buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
 +
 +      buf = kmalloc(buf_len, GFP_KERNEL);
 +      if (buf == NULL)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      buf[i++] = 0;
 +      buf[i++] = 0;
 +      buf[i++] = 0;
 +
 +      /* Capacity List Length */
 +      if ((buf_len > 12) && chip->option.mspro_formatter_enable &&
 +          (chip->lun2card[lun] & MS_CARD) && (!card || (card == MS_CARD))) {
 +              buf[i++] = 0x10;
 +              desc_cnt = 2;
 +      } else {
 +              buf[i++] = 0x08;
 +              desc_cnt = 1;
 +      }
 +
 +      while (desc_cnt) {
 +              if (check_card_ready(chip, lun)) {
 +                      buf[i++] = (unsigned char)((chip->capacity[lun]) >> 24);
 +                      buf[i++] = (unsigned char)((chip->capacity[lun]) >> 16);
 +                      buf[i++] = (unsigned char)((chip->capacity[lun]) >> 8);
 +                      buf[i++] = (unsigned char)(chip->capacity[lun]);
 +
 +                      if (desc_cnt == 2)
 +                              /* Byte[8]: Descriptor Type: Formatted medium */
 +                              buf[i++] = 2;
 +                      else
 +                              buf[i++] = 0;   /* Byte[16] */
 +              } else {
 +                      buf[i++] = 0xFF;
 +                      buf[i++] = 0xFF;
 +                      buf[i++] = 0xFF;
 +                      buf[i++] = 0xFF;
 +
 +                      if (desc_cnt == 2)
 +                              /* Byte[8]: Descriptor Type: No medium */
 +                              buf[i++] = 3;
 +                      else
 +                              buf[i++] = 0;   /*Byte[16] */
 +              }
 +
 +              buf[i++] = 0x00;
 +              buf[i++] = 0x02;
 +              buf[i++] = 0x00;
 +
 +              desc_cnt--;
 +      }
 +
 +      buf_len = min(scsi_bufflen(srb), buf_len);
 +      rts51x_set_xfer_buf(buf, buf_len, srb);
 +      kfree(buf);
 +
 +      scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int read_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned char *buf;
 +      unsigned int lun = SCSI_LUN(srb);
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (!check_lun_mc(chip, lun)) {
 +              set_lun_mc(chip, lun);
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
 +              return TRANSPORT_FAILED;
 +      }
 +
 +      buf = kmalloc(8, GFP_KERNEL);
 +      if (buf == NULL)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      buf[0] = (unsigned char)((chip->capacity[lun] - 1) >> 24);
 +      buf[1] = (unsigned char)((chip->capacity[lun] - 1) >> 16);
 +      buf[2] = (unsigned char)((chip->capacity[lun] - 1) >> 8);
 +      buf[3] = (unsigned char)(chip->capacity[lun] - 1);
 +
 +      buf[4] = 0x00;
 +      buf[5] = 0x00;
 +      buf[6] = 0x02;
 +      buf[7] = 0x00;
 +
 +      rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
 +      kfree(buf);
 +
 +      scsi_set_resid(srb, 0);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int get_dev_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +      unsigned int buf_len;
 +      u8 status[32] = { 0 };
 +
 +      rts51x_pp_status(chip, lun, status, 32);
 +
 +      buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status));
 +      rts51x_set_xfer_buf(status, buf_len, srb);
 +      scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int read_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      u8 rts51x_status[16];
 +      unsigned int buf_len;
 +      unsigned int lun = SCSI_LUN(srb);
 +
 +      rts51x_read_status(chip, lun, rts51x_status, 16);
 +
 +      buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rts51x_status));
 +      rts51x_set_xfer_buf(rts51x_status, buf_len, srb);
 +      scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int read_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +      unsigned short addr, len, i;
 +      int retval;
 +      u8 *buf;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
 +      len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
 +
 +      if (addr < 0xe000) {
 +              RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
 +              return TRANSPORT_GOOD;
 +      }
 +
 +      buf = vmalloc(len);
 +      if (!buf)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      for (i = 0; i < len; i++) {
 +              retval = rts51x_ep0_read_register(chip, addr + i, buf + i);
 +              if (retval != STATUS_SUCCESS) {
 +                      vfree(buf);
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +      }
 +
 +      len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
 +      rts51x_set_xfer_buf(buf, len, srb);
 +      scsi_set_resid(srb, scsi_bufflen(srb) - len);
 +
 +      vfree(buf);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int write_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +      unsigned short addr, len, i;
 +      int retval;
 +      u8 *buf;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
 +      len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
 +
 +      if (addr < 0xe000) {
 +              RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
 +              return TRANSPORT_GOOD;
 +      }
 +
 +      len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
 +      buf = vmalloc(len);
 +      if (!buf)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      rts51x_get_xfer_buf(buf, len, srb);
 +
 +      for (i = 0; i < len; i++) {
 +              retval =
 +                  rts51x_ep0_write_register(chip, addr + i, 0xFF, buf[i]);
 +              if (retval != STATUS_SUCCESS) {
 +                      vfree(buf);
 +                      set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +      }
 +
 +      vfree(buf);
 +      scsi_set_resid(srb, scsi_bufflen(srb) - len);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int get_sd_csd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      struct sd_info *sd_card = &(chip->sd_card);
 +      unsigned int lun = SCSI_LUN(srb);
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (get_lun_card(chip, lun) != SD_CARD) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      scsi_set_resid(srb, 0);
 +      rts51x_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int read_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int retval;
 +      u8 addr, len, i;
 +      u8 *buf;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      addr = srb->cmnd[5];
 +      len = srb->cmnd[7];
 +
 +      if (len) {
 +              buf = vmalloc(len);
 +              if (!buf)
 +                      TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +              for (i = 0; i < len; i++) {
 +                      retval =
 +                          rts51x_read_phy_register(chip, addr + i, buf + i);
 +                      if (retval != STATUS_SUCCESS) {
 +                              vfree(buf);
 +                              set_sense_type(chip, SCSI_LUN(srb),
 +                                      SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +                      }
 +              }
 +
 +              len = min(scsi_bufflen(srb), (unsigned int)len);
 +              rts51x_set_xfer_buf(buf, len, srb);
 +              scsi_set_resid(srb, scsi_bufflen(srb) - len);
 +
 +              vfree(buf);
 +      }
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int write_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int retval;
 +      u8 addr, len, i;
 +      u8 *buf;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      addr = srb->cmnd[5];
 +      len = srb->cmnd[7];
 +
 +      if (len) {
 +              len = min(scsi_bufflen(srb), (unsigned int)len);
 +
 +              buf = vmalloc(len);
 +              if (buf == NULL)
 +                      TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +              rts51x_get_xfer_buf(buf, len, srb);
 +              scsi_set_resid(srb, scsi_bufflen(srb) - len);
 +
 +              for (i = 0; i < len; i++) {
 +                      retval =
 +                          rts51x_write_phy_register(chip, addr + i, buf[i]);
 +                      if (retval != STATUS_SUCCESS) {
 +                              vfree(buf);
 +                              set_sense_type(chip, SCSI_LUN(srb),
 +                                             SENSE_TYPE_MEDIA_WRITE_ERR);
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +                      }
 +              }
 +
 +              vfree(buf);
 +      }
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int get_card_bus_width(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +      u8 card, bus_width;
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      card = get_lun_card(chip, lun);
 +      if ((card == SD_CARD) || (card == MS_CARD)) {
 +              bus_width = chip->card_bus_width[lun];
 +      } else {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      scsi_set_resid(srb, 0);
 +      rts51x_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +#ifdef _MSG_TRACE
 +static int trace_msg_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned char *buf = NULL;
 +      u8 clear;
 +      unsigned int buf_len;
 +
 +      buf_len =
 +          4 +
 +          ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) * TRACE_ITEM_CNT);
 +
 +      if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
 +              set_sense_type(chip, SCSI_LUN(srb),
 +                             SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      clear = srb->cmnd[2];
 +
 +      buf = vmalloc(scsi_bufflen(srb));
 +      if (buf == NULL)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      rts51x_trace_msg(chip, buf, clear);
 +
 +      rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
 +      vfree(buf);
 +
 +      scsi_set_resid(srb, 0);
 +      return TRANSPORT_GOOD;
 +}
 +#endif
 +
 +static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int retval = STATUS_SUCCESS;
 +      unsigned int lun = SCSI_LUN(srb);
 +      u8 cmd_type, mask, value, idx, mode, len;
 +      u16 addr;
 +      u32 timeout;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      switch (srb->cmnd[3]) {
 +      case INIT_BATCHCMD:
 +              rts51x_init_cmd(chip);
 +              break;
 +
 +      case ADD_BATCHCMD:
 +              cmd_type = srb->cmnd[4];
 +              if (cmd_type > 2) {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
 +              mask = srb->cmnd[7];
 +              value = srb->cmnd[8];
 +              rts51x_add_cmd(chip, cmd_type, addr, mask, value);
 +              break;
 +
 +      case SEND_BATCHCMD:
 +              mode = srb->cmnd[4];
 +              len = srb->cmnd[5];
 +              timeout =
 +                  ((u32) srb->cmnd[6] << 24) | ((u32) srb->
 +                                                cmnd[7] << 16) | ((u32) srb->
 +                                                                  cmnd[8] <<
 +                                                                  8) | ((u32)
 +                                                                        srb->
 +                                                                        cmnd
 +                                                                        [9]);
 +              retval = rts51x_send_cmd(chip, mode, 1000);
 +              if (retval != STATUS_SUCCESS) {
 +                      set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              if (mode & STAGE_R) {
 +                      retval = rts51x_get_rsp(chip, len, timeout);
 +                      if (retval != STATUS_SUCCESS) {
 +                              set_sense_type(chip, lun,
 +                                      SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +                      }
 +              }
 +              break;
 +
 +      case GET_BATCHRSP:
 +              idx = srb->cmnd[4];
 +              value = chip->rsp_buf[idx];
 +              if (scsi_bufflen(srb) < 1) {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              rts51x_set_xfer_buf(&value, 1, srb);
 +              scsi_set_resid(srb, 0);
 +              break;
 +
 +      default:
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (retval != STATUS_SUCCESS) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      return TRANSPORT_GOOD;
 +}
 +
 +static int suit_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int result;
 +
 +      switch (srb->cmnd[3]) {
 +      case INIT_BATCHCMD:
 +      case ADD_BATCHCMD:
 +      case SEND_BATCHCMD:
 +      case GET_BATCHRSP:
 +              result = rw_mem_cmd_buf(srb, chip);
 +              break;
 +      default:
 +              result = TRANSPORT_ERROR;
 +      }
 +
 +      return result;
 +}
 +
 +static int app_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int result;
 +
 +      switch (srb->cmnd[2]) {
 +      case PP_READ10:
 +      case PP_WRITE10:
 +              result = read_write(srb, chip);
 +              break;
 +
 +      case SUIT_CMD:
 +              result = suit_cmd(srb, chip);
 +              break;
 +
 +      case READ_PHY:
 +              result = read_phy_register(srb, chip);
 +              break;
 +
 +      case WRITE_PHY:
 +              result = write_phy_register(srb, chip);
 +              break;
 +
 +      case GET_DEV_STATUS:
 +              result = get_dev_status(srb, chip);
 +              break;
 +
 +      default:
 +              set_sense_type(chip, SCSI_LUN(srb),
 +                             SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      return result;
 +}
 +
 +static int vendor_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int result = TRANSPORT_GOOD;
 +
 +      switch (srb->cmnd[1]) {
 +      case READ_STATUS:
 +              result = read_status(srb, chip);
 +              break;
 +
 +      case READ_MEM:
 +              result = read_mem(srb, chip);
 +              break;
 +
 +      case WRITE_MEM:
 +              result = write_mem(srb, chip);
 +              break;
 +
 +      case GET_BUS_WIDTH:
 +              result = get_card_bus_width(srb, chip);
 +              break;
 +
 +      case GET_SD_CSD:
 +              result = get_sd_csd(srb, chip);
 +              break;
 +
 +#ifdef _MSG_TRACE
 +      case TRACE_MSG:
 +              result = trace_msg_cmd(srb, chip);
 +              break;
 +#endif
 +
 +      case SCSI_APP_CMD:
 +              result = app_cmd(srb, chip);
 +              break;
 +
 +      default:
 +              set_sense_type(chip, SCSI_LUN(srb),
 +                             SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      return result;
 +}
 +
 +static int ms_format_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      struct ms_info *ms_card = &(chip->ms_card);
 +      unsigned int lun = SCSI_LUN(srb);
 +      int retval, quick_format;
 +
 +      if (get_lun_card(chip, lun) != MS_CARD) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47)
 +          || (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D)
 +          || (srb->cmnd[7] != 0x74)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (srb->cmnd[8] & 0x01)
 +              quick_format = 0;
 +      else
 +              quick_format = 1;
 +
 +      if (!(chip->card_ready & MS_CARD)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (chip->card_wp & MS_CARD) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (!CHK_MSPRO(ms_card)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
 +      if (retval != STATUS_SUCCESS) {
 +              set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      scsi_set_resid(srb, 0);
 +      return TRANSPORT_GOOD;
 +}
 +
 +#ifdef SUPPORT_PCGL_1P18
 +int get_ms_information(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      struct ms_info *ms_card = &(chip->ms_card);
 +      unsigned int lun = SCSI_LUN(srb);
 +      u8 dev_info_id, data_len;
 +      u8 *buf;
 +      unsigned int buf_len;
 +      int i;
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +      if ((get_lun_card(chip, lun) != MS_CARD)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
 +          (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
 +          (srb->cmnd[7] != 0x44)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      dev_info_id = srb->cmnd[3];
 +      if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
 +          (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
 +          !CHK_MSPRO(ms_card)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (dev_info_id == 0x15)
 +              buf_len = data_len = 0x3A;
 +      else
 +              buf_len = data_len = 0x6A;
 +
 +      buf = kmalloc(buf_len, GFP_KERNEL);
 +      if (!buf)
 +              TRACE_RET(chip, TRANSPORT_ERROR);
 +
 +      i = 0;
 +      /* GET Memory Stick Media Information Response Header */
 +      buf[i++] = 0x00;        /* Data length MSB */
 +      buf[i++] = data_len;    /* Data length LSB */
 +      /* Device Information Type Code */
 +      if (CHK_MSXC(ms_card))
 +              buf[i++] = 0x03;
 +      else
 +              buf[i++] = 0x02;
 +      /* SGM bit */
 +      buf[i++] = 0x01;
 +      /* Reserved */
 +      buf[i++] = 0x00;
 +      buf[i++] = 0x00;
 +      buf[i++] = 0x00;
 +      /* Number of Device Information */
 +      buf[i++] = 0x01;
 +
 +      /*  Device Information Body
 +       *  Device Information ID Number */
 +      buf[i++] = dev_info_id;
 +      /* Device Information Length */
 +      if (dev_info_id == 0x15)
 +              data_len = 0x31;
 +      else
 +              data_len = 0x61;
 +      buf[i++] = 0x00;        /* Data length MSB */
 +      buf[i++] = data_len;    /* Data length LSB */
 +      /* Valid Bit */
 +      buf[i++] = 0x80;
 +      if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
 +              /* System Information */
 +              memcpy(buf + i, ms_card->raw_sys_info, 96);
 +      } else {
 +              /* Model Name */
 +              memcpy(buf + i, ms_card->raw_model_name, 48);
 +      }
 +
 +      rts51x_set_xfer_buf(buf, buf_len, srb);
 +
 +      if (dev_info_id == 0x15)
 +              scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
 +      else
 +              scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
 +
 +      kfree(buf);
 +      return STATUS_SUCCESS;
 +}
 +#endif
 +
 +static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      int retval = TRANSPORT_ERROR;
 +
 +      if (srb->cmnd[2] == MS_FORMAT)
 +              retval = ms_format_cmnd(srb, chip);
 +#ifdef SUPPORT_PCGL_1P18
 +      else if (srb->cmnd[2] == GET_MS_INFORMATION)
 +              retval = get_ms_information(srb, chip);
 +#endif
 +
 +      return retval;
 +}
 +
 +#ifdef SUPPORT_CPRM
 +static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      unsigned int lun = SCSI_LUN(srb);
 +      int result;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      sd_cleanup_work(chip);
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +      if ((get_lun_card(chip, lun) != SD_CARD)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      switch (srb->cmnd[0]) {
 +      case SD_PASS_THRU_MODE:
 +              result = sd_pass_thru_mode(srb, chip);
 +              break;
 +
 +      case SD_EXECUTE_NO_DATA:
 +              result = sd_execute_no_data(srb, chip);
 +              break;
 +
 +      case SD_EXECUTE_READ:
 +              result = sd_execute_read_data(srb, chip);
 +              break;
 +
 +      case SD_EXECUTE_WRITE:
 +              result = sd_execute_write_data(srb, chip);
 +              break;
 +
 +      case SD_GET_RSP:
 +              result = sd_get_cmd_rsp(srb, chip);
 +              break;
 +
 +      case SD_HW_RST:
 +              result = sd_hw_rst(srb, chip);
 +              break;
 +
 +      default:
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      return result;
 +}
 +#endif
 +
 +#ifdef SUPPORT_MAGIC_GATE
 +int mg_report_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      struct ms_info *ms_card = &(chip->ms_card);
 +      unsigned int lun = SCSI_LUN(srb);
 +      int retval;
 +      u8 key_format;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      ms_cleanup_work(chip);
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +      if ((get_lun_card(chip, lun) != MS_CARD)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (srb->cmnd[7] != KC_MG_R_PRO) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (!CHK_MSPRO(ms_card)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      key_format = srb->cmnd[10] & 0x3F;
 +
 +      switch (key_format) {
 +      case KF_GET_LOC_EKB:
 +              if ((scsi_bufflen(srb) == 0x41C) &&
 +                  (srb->cmnd[8] == 0x04) && (srb->cmnd[9] == 0x1C)) {
 +                      retval = mg_get_local_EKB(srb, chip);
 +                      if (retval != STATUS_SUCCESS)
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +              } else {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              break;
 +
 +      case KF_RSP_CHG:
 +              if ((scsi_bufflen(srb) == 0x24) &&
 +                  (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x24)) {
 +                      retval = mg_get_rsp_chg(srb, chip);
 +                      if (retval != STATUS_SUCCESS)
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +              } else {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              break;
 +
 +      case KF_GET_ICV:
 +              ms_card->mg_entry_num = srb->cmnd[5];
 +              if ((scsi_bufflen(srb) == 0x404) &&
 +                  (srb->cmnd[8] == 0x04) &&
 +                  (srb->cmnd[9] == 0x04) &&
 +                  (srb->cmnd[2] == 0x00) &&
 +                  (srb->cmnd[3] == 0x00) &&
 +                  (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
 +                      retval = mg_get_ICV(srb, chip);
 +                      if (retval != STATUS_SUCCESS)
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +              } else {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              break;
 +
 +      default:
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      scsi_set_resid(srb, 0);
 +      return TRANSPORT_GOOD;
 +}
 +
 +int mg_send_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +      struct ms_info *ms_card = &(chip->ms_card);
 +      unsigned int lun = SCSI_LUN(srb);
 +      int retval;
 +      u8 key_format;
 +
 +      rts51x_prepare_run(chip);
 +      RTS51X_SET_STAT(chip, STAT_RUN);
 +
 +      ms_cleanup_work(chip);
 +
 +      if (!check_card_ready(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +      if (check_card_wp(chip, lun)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +      if ((get_lun_card(chip, lun) != MS_CARD)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (srb->cmnd[7] != KC_MG_R_PRO) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      if (!CHK_MSPRO(ms_card)) {
 +              set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      key_format = srb->cmnd[10] & 0x3F;
 +
 +      switch (key_format) {
 +      case KF_SET_LEAF_ID:
 +              if ((scsi_bufflen(srb) == 0x0C) &&
 +                  (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
 +                      retval = mg_set_leaf_id(srb, chip);
 +                      if (retval != STATUS_SUCCESS)
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +              } else {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              break;
 +
 +      case KF_CHG_HOST:
 +              if ((scsi_bufflen(srb) == 0x0C) &&
 +                  (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
 +                      retval = mg_chg(srb, chip);
 +                      if (retval != STATUS_SUCCESS)
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +              } else {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              break;
 +
 +      case KF_RSP_HOST:
 +              if ((scsi_bufflen(srb) == 0x0C) &&
 +                  (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
 +                      retval = mg_rsp(srb, chip);
 +                      if (retval != STATUS_SUCCESS)
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +              } else {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              break;
 +
 +      case KF_SET_ICV:
 +              ms_card->mg_entry_num = srb->cmnd[5];
 +              if ((scsi_bufflen(srb) == 0x404) &&
 +                  (srb->cmnd[8] == 0x04) &&
 +                  (srb->cmnd[9] == 0x04) &&
 +                  (srb->cmnd[2] == 0x00) &&
 +                  (srb->cmnd[3] == 0x00) &&
 +                  (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
 +                      retval = mg_set_ICV(srb, chip);
 +                      if (retval != STATUS_SUCCESS)
 +                              TRACE_RET(chip, TRANSPORT_FAILED);
 +              } else {
 +                      set_sense_type(chip, lun,
 +                                     SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +              break;
 +
 +      default:
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              TRACE_RET(chip, TRANSPORT_FAILED);
 +      }
 +
 +      scsi_set_resid(srb, 0);
 +      return TRANSPORT_GOOD;
 +}
 +#endif
 +
 +int rts51x_scsi_handler(struct scsi_cmnd *srb, struct rts51x_chip *chip)
 +{
 +#ifdef SUPPORT_SD_LOCK
 +      struct sd_info *sd_card = &(chip->sd_card);
 +#endif
 +      struct ms_info *ms_card = &(chip->ms_card);
 +      unsigned int lun = SCSI_LUN(srb);
 +      int result = TRANSPORT_GOOD;
 +
 +#ifdef SUPPORT_SD_LOCK
 +      if (sd_card->sd_erase_status) {
 +              /* Block all SCSI command except for REQUEST_SENSE
 +               * and rs_ppstatus */
 +              if (!
 +                  ((srb->cmnd[0] == VENDOR_CMND)
 +                   && (srb->cmnd[1] == SCSI_APP_CMD)
 +                   && (srb->cmnd[2] == GET_DEV_STATUS))
 +                  && (srb->cmnd[0] != REQUEST_SENSE)) {
 +                      /* Logical Unit Not Ready Format in Progress */
 +                      set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
 +                                     0, 0);
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +      }
 +#endif
 +
 +      if ((get_lun_card(chip, lun) == MS_CARD) &&
 +          (ms_card->format_status == FORMAT_IN_PROGRESS)) {
 +              if ((srb->cmnd[0] != REQUEST_SENSE)
 +                  && (srb->cmnd[0] != INQUIRY)) {
 +                      /* Logical Unit Not Ready Format in Progress */
 +                      set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
 +                                     0, (u16) (ms_card->progress));
 +                      TRACE_RET(chip, TRANSPORT_FAILED);
 +              }
 +      }
 +
 +      switch (srb->cmnd[0]) {
 +      case READ_10:
 +      case WRITE_10:
 +      case READ_6:
 +      case WRITE_6:
 +              result = read_write(srb, chip);
 +              break;
 +
 +      case TEST_UNIT_READY:
 +              result = test_unit_ready(srb, chip);
 +              break;
 +
 +      case INQUIRY:
 +              result = inquiry(srb, chip);
 +              break;
 +
 +      case READ_CAPACITY:
 +              result = read_capacity(srb, chip);
 +              break;
 +
 +      case START_STOP:
 +              result = start_stop_unit(srb, chip);
 +              break;
 +
 +      case ALLOW_MEDIUM_REMOVAL:
 +              result = allow_medium_removal(srb, chip);
 +              break;
 +
 +      case REQUEST_SENSE:
 +              result = request_sense(srb, chip);
 +              break;
 +
 +      case MODE_SENSE:
 +      case MODE_SENSE_10:
 +              result = mode_sense(srb, chip);
 +              break;
 +
 +      case 0x23:
 +              result = read_format_capacity(srb, chip);
 +              break;
 +
 +      case VENDOR_CMND:
 +              result = vendor_cmnd(srb, chip);
 +              break;
 +
 +      case MS_SP_CMND:
 +              result = ms_sp_cmnd(srb, chip);
 +              break;
 +
 +#ifdef SUPPORT_CPRM
 +      case SD_PASS_THRU_MODE:
 +      case SD_EXECUTE_NO_DATA:
 +      case SD_EXECUTE_READ:
 +      case SD_EXECUTE_WRITE:
 +      case SD_GET_RSP:
 +      case SD_HW_RST:
 +              result = sd_extention_cmnd(srb, chip);
 +              break;
 +#endif
 +
 +#ifdef SUPPORT_MAGIC_GATE
 +      case CMD_MSPRO_MG_RKEY:
 +              result = mg_report_key(srb, chip);
 +              break;
 +
 +      case CMD_MSPRO_MG_SKEY:
 +              result = mg_send_key(srb, chip);
 +              break;
 +#endif
 +
 +      case FORMAT_UNIT:
 +      case MODE_SELECT:
 +      case VERIFY:
 +              result = TRANSPORT_GOOD;
 +              break;
 +
 +      default:
 +              set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 +              result = TRANSPORT_FAILED;
 +      }
 +
 +      return result;
 +}
 +
 +/***********************************************************************
 + * Host functions
 + ***********************************************************************/
 +
 +const char *host_info(struct Scsi_Host *host)
 +{
 +      return "SCSI emulation for RTS51xx USB driver-based card reader";
 +}
 +
 +int slave_alloc(struct scsi_device *sdev)
 +{
 +      /*
 +       * Set the INQUIRY transfer length to 36.  We don't use any of
 +       * the extra data and many devices choke if asked for more or
 +       * less than 36 bytes.
 +       */
 +      sdev->inquiry_len = 36;
 +      return 0;
 +}
 +
 +int slave_configure(struct scsi_device *sdev)
 +{
 +      /* Scatter-gather buffers (all but the last) must have a length
 +       * divisible by the bulk maxpacket size.  Otherwise a data packet
 +       * would end up being short, causing a premature end to the data
 +       * transfer.  Since high-speed bulk pipes have a maxpacket size
 +       * of 512, we'll use that as the scsi device queue's DMA alignment
 +       * mask.  Guaranteeing proper alignment of the first buffer will
 +       * have the desired effect because, except at the beginning and
 +       * the end, scatter-gather buffers follow page boundaries. */
 +      blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
 +
 +      /* Set the SCSI level to at least 2.  We'll leave it at 3 if that's
 +       * what is originally reported.  We need this to avoid confusing
 +       * the SCSI layer with devices that report 0 or 1, but need 10-byte
 +       * commands (ala ATAPI devices behind certain bridges, or devices
 +       * which simply have broken INQUIRY data).
 +       *
 +       * NOTE: This means /dev/sg programs (ala cdrecord) will get the
 +       * actual information.  This seems to be the preference for
 +       * programs like that.
 +       *
 +       * NOTE: This also means that /proc/scsi/scsi and sysfs may report
 +       * the actual value or the modified one, depending on where the
 +       * data comes from.
 +       */
 +      if (sdev->scsi_level < SCSI_2)
 +              sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
 +
 +      return 0;
 +}
 +
 +/***********************************************************************
 + * /proc/scsi/ functions
 + ***********************************************************************/
 +
 +/* we use this macro to help us write into the buffer */
 +#undef SPRINTF
 +#define SPRINTF(args...) \
 +      do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
 +
 +int proc_info(struct Scsi_Host *host, char *buffer,
 +            char **start, off_t offset, int length, int inout)
 +{
 +      char *pos = buffer;
 +
 +      /* if someone is sending us data, just throw it away */
 +      if (inout)
 +              return length;
 +
 +      /* print the controller name */
 +      SPRINTF("   Host scsi%d: %s\n", host->host_no, RTS51X_NAME);
 +
 +      /* print product, vendor, and driver version strings */
 +      SPRINTF("       Vendor: Realtek Corp.\n");
 +      SPRINTF("      Product: RTS51xx USB Card Reader\n");
 +      SPRINTF("      Version: %s\n", DRIVER_VERSION);
 +      SPRINTF("        Build: %s\n", __TIME__);
 +
 +      /*
 +       * Calculate start of next buffer, and return value.
 +       */
 +      *start = buffer + offset;
 +
 +      if ((pos - buffer) < offset)
 +              return 0;
 +      else if ((pos - buffer - offset) < length)
 +              return pos - buffer - offset;
 +      else
 +              return length;
 +}
 +
 +/* queue a command */
 +/* This is always called with scsi_lock(host) held */
 +int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
 +{
 +      struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
 +
 +      /* check for state-transition errors */
 +      if (chip->srb != NULL) {
 +              RTS51X_DEBUGP("Error in %s: chip->srb = %p\n",
 +                             __func__, chip->srb);
 +              return SCSI_MLQUEUE_HOST_BUSY;
 +      }
 +
 +      /* fail the command if we are disconnecting */
 +      if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) {
 +              RTS51X_DEBUGP("Fail command during disconnect\n");
 +              srb->result = DID_NO_CONNECT << 16;
 +              done(srb);
 +              return 0;
 +      }
 +
 +      /* enqueue the command and wake up the control thread */
 +      srb->scsi_done = done;
 +      chip->srb = srb;
 +      complete(&chip->usb->cmnd_ready);
 +
 +      return 0;
 +}
 +
 +#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) */
 +int queuecommand(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
 +{
 +      return queuecommand_lck(srb, done);
 +}
 +#else
 +DEF_SCSI_QCMD(queuecommand)
 +#endif
 +/***********************************************************************
 + * Error handling functions
 + ***********************************************************************/
 +/* Command timeout and abort */
 +int command_abort(struct scsi_cmnd *srb)
 +{
 +      struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
 +
 +      RTS51X_DEBUGP("%s called\n", __func__);
 +
 +      /* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
 +       * bits are protected by the host lock. */
 +      scsi_lock(rts51x_to_host(chip));
 +
 +      /* Is this command still active? */
 +      if (chip->srb != srb) {
 +              scsi_unlock(rts51x_to_host(chip));
 +              RTS51X_DEBUGP("-- nothing to abort\n");
 +              return FAILED;
 +      }
 +
 +      /* Set the TIMED_OUT bit.  Also set the ABORTING bit, but only if
 +       * a device reset isn't already in progress (to avoid interfering
 +       * with the reset).  Note that we must retain the host lock while
 +       * calling usb_stor_stop_transport(); otherwise it might interfere
 +       * with an auto-reset that begins as soon as we release the lock. */
 +      set_bit(FLIDX_TIMED_OUT, &chip->usb->dflags);
 +      if (!test_bit(FLIDX_RESETTING, &chip->usb->dflags)) {
 +              set_bit(FLIDX_ABORTING, &chip->usb->dflags);
 +              /* rts51x_stop_transport(us); */
 +      }
 +      scsi_unlock(rts51x_to_host(chip));
 +
 +      /* Wait for the aborted command to finish */
 +      wait_for_completion(&chip->usb->notify);
 +      return SUCCESS;
 +}
 +
 +/* This invokes the transport reset mechanism to reset the state of the
 + * device */
 +int device_reset(struct scsi_cmnd *srb)
 +{
 +      int result = 0;
 +
 +      RTS51X_DEBUGP("%s called\n", __func__);
 +
 +      return result < 0 ? FAILED : SUCCESS;
 +}
 +
 +/* Simulate a SCSI bus reset by resetting the device's USB port. */
 +int bus_reset(struct scsi_cmnd *srb)
 +{
 +      int result = 0;
 +
 +      RTS51X_DEBUGP("%s called\n", __func__);
 +
 +      return result < 0 ? FAILED : SUCCESS;
 +}
 +
 +static const char *rts5139_info(struct Scsi_Host *host)
 +{
 +      return "SCSI emulation for RTS5139 USB card reader";
 +}
 +
 +struct scsi_host_template rts51x_host_template = {
 +      /* basic userland interface stuff */
 +      .name = RTS51X_NAME,
 +      .proc_name = RTS51X_NAME,
 +      .proc_info = proc_info,
 +      .info = rts5139_info,
 +
 +      /* command interface -- queued only */
 +      .queuecommand = queuecommand,
 +
 +      /* error and abort handlers */
 +      .eh_abort_handler = command_abort,
 +      .eh_device_reset_handler = device_reset,
 +      .eh_bus_reset_handler = bus_reset,
 +
 +      /* queue commands only, only one command per LUN */
 +      .can_queue = 1,
 +      .cmd_per_lun = 1,
 +
 +      /* unknown initiator id */
 +      .this_id = -1,
 +
 +      .slave_alloc = slave_alloc,
 +      .slave_configure = slave_configure,
 +
 +      /* lots of sg segments can be handled */
 +      .sg_tablesize = SG_ALL,
 +
 +      /* limit the total size of a transfer to 120 KB */
 +      .max_sectors = 240,
 +
 +      /* merge commands... this seems to help performance, but
 +       * periodically someone should test to see which setting is more
 +       * optimal.
 +       */
 +      .use_clustering = 1,
 +
 +      /* emulated HBA */
 +      .emulated = 1,
 +
 +      /* we do our own delay after a device or bus reset */
 +      .skip_settle_delay = 1,
 +
 +      /* sysfs device attributes */
 +      /* .sdev_attrs = sysfs_device_attr_list, */
 +
 +      /* module management */
 +      .module = THIS_MODULE
 +};
 +
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 7b38512d6c419cd43690bca290ab4765220c409b,0000000000000000000000000000000000000000..ced26c8ccd573eb8e6757a30681901b7a0ac88eb
mode 100644,000000..100644
--- /dev/null
@@@ -1,424 -1,0 +1,425 @@@
 +/*
 + * opal driver interface to hvc_console.c
 + *
 + * Copyright 2011 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; either version 2 of the License, or
 + * (at your option) any later version.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 + *
 + */
 +
 +#undef DEBUG
 +
 +#include <linux/types.h>
 +#include <linux/init.h>
 +#include <linux/delay.h>
 +#include <linux/slab.h>
 +#include <linux/console.h>
 +#include <linux/of.h>
 +#include <linux/of_platform.h>
++#include <linux/export.h>
 +
 +#include <asm/hvconsole.h>
 +#include <asm/prom.h>
 +#include <asm/firmware.h>
 +#include <asm/hvsi.h>
 +#include <asm/udbg.h>
 +#include <asm/opal.h>
 +
 +#include "hvc_console.h"
 +
 +static const char hvc_opal_name[] = "hvc_opal";
 +
 +static struct of_device_id hvc_opal_match[] __devinitdata = {
 +      { .name = "serial", .compatible = "ibm,opal-console-raw" },
 +      { .name = "serial", .compatible = "ibm,opal-console-hvsi" },
 +      { },
 +};
 +
 +typedef enum hv_protocol {
 +      HV_PROTOCOL_RAW,
 +      HV_PROTOCOL_HVSI
 +} hv_protocol_t;
 +
 +struct hvc_opal_priv {
 +      hv_protocol_t           proto;  /* Raw data or HVSI packets */
 +      struct hvsi_priv        hvsi;   /* HVSI specific data */
 +};
 +static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES];
 +
 +/* For early boot console */
 +static struct hvc_opal_priv hvc_opal_boot_priv;
 +static u32 hvc_opal_boot_termno;
 +
 +static const struct hv_ops hvc_opal_raw_ops = {
 +      .get_chars = opal_get_chars,
 +      .put_chars = opal_put_chars,
 +      .notifier_add = notifier_add_irq,
 +      .notifier_del = notifier_del_irq,
 +      .notifier_hangup = notifier_hangup_irq,
 +};
 +
 +static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
 +{
 +      struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
 +
 +      if (WARN_ON(!pv))
 +              return -ENODEV;
 +
 +      return hvsilib_get_chars(&pv->hvsi, buf, count);
 +}
 +
 +static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
 +{
 +      struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
 +
 +      if (WARN_ON(!pv))
 +              return -ENODEV;
 +
 +      return hvsilib_put_chars(&pv->hvsi, buf, count);
 +}
 +
 +static int hvc_opal_hvsi_open(struct hvc_struct *hp, int data)
 +{
 +      struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
 +      int rc;
 +
 +      pr_devel("HVSI@%x: do open !\n", hp->vtermno);
 +
 +      rc = notifier_add_irq(hp, data);
 +      if (rc)
 +              return rc;
 +
 +      return hvsilib_open(&pv->hvsi, hp);
 +}
 +
 +static void hvc_opal_hvsi_close(struct hvc_struct *hp, int data)
 +{
 +      struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
 +
 +      pr_devel("HVSI@%x: do close !\n", hp->vtermno);
 +
 +      hvsilib_close(&pv->hvsi, hp);
 +
 +      notifier_del_irq(hp, data);
 +}
 +
 +void hvc_opal_hvsi_hangup(struct hvc_struct *hp, int data)
 +{
 +      struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
 +
 +      pr_devel("HVSI@%x: do hangup !\n", hp->vtermno);
 +
 +      hvsilib_close(&pv->hvsi, hp);
 +
 +      notifier_hangup_irq(hp, data);
 +}
 +
 +static int hvc_opal_hvsi_tiocmget(struct hvc_struct *hp)
 +{
 +      struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
 +
 +      if (!pv)
 +              return -EINVAL;
 +      return pv->hvsi.mctrl;
 +}
 +
 +static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
 +                              unsigned int clear)
 +{
 +      struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
 +
 +      pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
 +               hp->vtermno, set, clear);
 +
 +      if (set & TIOCM_DTR)
 +              hvsilib_write_mctrl(&pv->hvsi, 1);
 +      else if (clear & TIOCM_DTR)
 +              hvsilib_write_mctrl(&pv->hvsi, 0);
 +
 +      return 0;
 +}
 +
 +static const struct hv_ops hvc_opal_hvsi_ops = {
 +      .get_chars = hvc_opal_hvsi_get_chars,
 +      .put_chars = hvc_opal_hvsi_put_chars,
 +      .notifier_add = hvc_opal_hvsi_open,
 +      .notifier_del = hvc_opal_hvsi_close,
 +      .notifier_hangup = hvc_opal_hvsi_hangup,
 +      .tiocmget = hvc_opal_hvsi_tiocmget,
 +      .tiocmset = hvc_opal_hvsi_tiocmset,
 +};
 +
 +static int __devinit hvc_opal_probe(struct platform_device *dev)
 +{
 +      const struct hv_ops *ops;
 +      struct hvc_struct *hp;
 +      struct hvc_opal_priv *pv;
 +      hv_protocol_t proto;
 +      unsigned int termno, boot = 0;
 +      const __be32 *reg;
 +
 +      if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) {
 +              proto = HV_PROTOCOL_RAW;
 +              ops = &hvc_opal_raw_ops;
 +      } else if (of_device_is_compatible(dev->dev.of_node,
 +                                         "ibm,opal-console-hvsi")) {
 +              proto = HV_PROTOCOL_HVSI;
 +              ops = &hvc_opal_hvsi_ops;
 +      } else {
 +              pr_err("hvc_opal: Unkown protocol for %s\n",
 +                     dev->dev.of_node->full_name);
 +              return -ENXIO;
 +      }
 +
 +      reg = of_get_property(dev->dev.of_node, "reg", NULL);
 +      termno = reg ? be32_to_cpup(reg) : 0;
 +
 +      /* Is it our boot one ? */
 +      if (hvc_opal_privs[termno] == &hvc_opal_boot_priv) {
 +              pv = hvc_opal_privs[termno];
 +              boot = 1;
 +      } else if (hvc_opal_privs[termno] == NULL) {
 +              pv = kzalloc(sizeof(struct hvc_opal_priv), GFP_KERNEL);
 +              if (!pv)
 +                      return -ENOMEM;
 +              pv->proto = proto;
 +              hvc_opal_privs[termno] = pv;
 +              if (proto == HV_PROTOCOL_HVSI)
 +                      hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars,
 +                                   termno, 0);
 +
 +              /* Instanciate now to establish a mapping index==vtermno */
 +              hvc_instantiate(termno, termno, ops);
 +      } else {
 +              pr_err("hvc_opal: Device %s has duplicate terminal number #%d\n",
 +                     dev->dev.of_node->full_name, termno);
 +              return -ENXIO;
 +      }
 +
 +      pr_info("hvc%d: %s protocol on %s%s\n", termno,
 +              proto == HV_PROTOCOL_RAW ? "raw" : "hvsi",
 +              dev->dev.of_node->full_name,
 +              boot ? " (boot console)" : "");
 +
 +      /* We don't do IRQ yet */
 +      hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS);
 +      if (IS_ERR(hp))
 +              return PTR_ERR(hp);
 +      dev_set_drvdata(&dev->dev, hp);
 +
 +      return 0;
 +}
 +
 +static int __devexit hvc_opal_remove(struct platform_device *dev)
 +{
 +      struct hvc_struct *hp = dev_get_drvdata(&dev->dev);
 +      int rc, termno;
 +
 +      termno = hp->vtermno;
 +      rc = hvc_remove(hp);
 +      if (rc == 0) {
 +              if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
 +                      kfree(hvc_opal_privs[termno]);
 +              hvc_opal_privs[termno] = NULL;
 +      }
 +      return rc;
 +}
 +
 +static struct platform_driver hvc_opal_driver = {
 +      .probe          = hvc_opal_probe,
 +      .remove         = __devexit_p(hvc_opal_remove),
 +      .driver         = {
 +              .name   = hvc_opal_name,
 +              .owner  = THIS_MODULE,
 +              .of_match_table = hvc_opal_match,
 +      }
 +};
 +
 +static int __init hvc_opal_init(void)
 +{
 +      if (!firmware_has_feature(FW_FEATURE_OPAL))
 +              return -ENODEV;
 +
 +      /* Register as a vio device to receive callbacks */
 +      return platform_driver_register(&hvc_opal_driver);
 +}
 +module_init(hvc_opal_init);
 +
 +static void __exit hvc_opal_exit(void)
 +{
 +      platform_driver_unregister(&hvc_opal_driver);
 +}
 +module_exit(hvc_opal_exit);
 +
 +static void udbg_opal_putc(char c)
 +{
 +      unsigned int termno = hvc_opal_boot_termno;
 +      int count = -1;
 +
 +      if (c == '\n')
 +              udbg_opal_putc('\r');
 +
 +      do {
 +              switch(hvc_opal_boot_priv.proto) {
 +              case HV_PROTOCOL_RAW:
 +                      count = opal_put_chars(termno, &c, 1);
 +                      break;
 +              case HV_PROTOCOL_HVSI:
 +                      count = hvc_opal_hvsi_put_chars(termno, &c, 1);
 +                      break;
 +              }
 +      } while(count == 0 || count == -EAGAIN);
 +}
 +
 +static int udbg_opal_getc_poll(void)
 +{
 +      unsigned int termno = hvc_opal_boot_termno;
 +      int rc = 0;
 +      char c;
 +
 +      switch(hvc_opal_boot_priv.proto) {
 +      case HV_PROTOCOL_RAW:
 +              rc = opal_get_chars(termno, &c, 1);
 +              break;
 +      case HV_PROTOCOL_HVSI:
 +              rc = hvc_opal_hvsi_get_chars(termno, &c, 1);
 +              break;
 +      }
 +      if (!rc)
 +              return -1;
 +      return c;
 +}
 +
 +static int udbg_opal_getc(void)
 +{
 +      int ch;
 +      for (;;) {
 +              ch = udbg_opal_getc_poll();
 +              if (ch == -1) {
 +                      /* This shouldn't be needed...but... */
 +                      volatile unsigned long delay;
 +                      for (delay=0; delay < 2000000; delay++)
 +                              ;
 +              } else {
 +                      return ch;
 +              }
 +      }
 +}
 +
 +static void udbg_init_opal_common(void)
 +{
 +      udbg_putc = udbg_opal_putc;
 +      udbg_getc = udbg_opal_getc;
 +      udbg_getc_poll = udbg_opal_getc_poll;
 +      tb_ticks_per_usec = 0x200; /* Make udelay not suck */
 +}
 +
 +void __init hvc_opal_init_early(void)
 +{
 +      struct device_node *stdout_node = NULL;
 +      const u32 *termno;
 +      const char *name = NULL;
 +      const struct hv_ops *ops;
 +      u32 index;
 +
 +      /* find the boot console from /chosen/stdout */
 +      if (of_chosen)
 +              name = of_get_property(of_chosen, "linux,stdout-path", NULL);
 +      if (name) {
 +              stdout_node = of_find_node_by_path(name);
 +              if (!stdout_node) {
 +                      pr_err("hvc_opal: Failed to locate default console!\n");
 +                      return;
 +              }
 +      } else {
 +              struct device_node *opal, *np;
 +
 +              /* Current OPAL takeover doesn't provide the stdout
 +               * path, so we hard wire it
 +               */
 +              opal = of_find_node_by_path("/ibm,opal/consoles");
 +              if (opal)
 +                      pr_devel("hvc_opal: Found consoles in new location\n");
 +              if (!opal) {
 +                      opal = of_find_node_by_path("/ibm,opal");
 +                      if (opal)
 +                              pr_devel("hvc_opal: "
 +                                       "Found consoles in old location\n");
 +              }
 +              if (!opal)
 +                      return;
 +              for_each_child_of_node(opal, np) {
 +                      if (!strcmp(np->name, "serial")) {
 +                              stdout_node = np;
 +                              break;
 +                      }
 +              }
 +              of_node_put(opal);
 +      }
 +      if (!stdout_node)
 +              return;
 +      termno = of_get_property(stdout_node, "reg", NULL);
 +      index = termno ? *termno : 0;
 +      if (index >= MAX_NR_HVC_CONSOLES)
 +              return;
 +      hvc_opal_privs[index] = &hvc_opal_boot_priv;
 +
 +      /* Check the protocol */
 +      if (of_device_is_compatible(stdout_node, "ibm,opal-console-raw")) {
 +              hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
 +              ops = &hvc_opal_raw_ops;
 +              pr_devel("hvc_opal: Found RAW console\n");
 +      }
 +      else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) {
 +              hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI;
 +              ops = &hvc_opal_hvsi_ops;
 +              hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars,
 +                           opal_put_chars, index, 1);
 +              /* HVSI, perform the handshake now */
 +              hvsilib_establish(&hvc_opal_boot_priv.hvsi);
 +              pr_devel("hvc_opal: Found HVSI console\n");
 +      } else
 +              goto out;
 +      hvc_opal_boot_termno = index;
 +      udbg_init_opal_common();
 +      add_preferred_console("hvc", index, NULL);
 +      hvc_instantiate(index, index, ops);
 +out:
 +      of_node_put(stdout_node);
 +}
 +
 +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
 +void __init udbg_init_debug_opal(void)
 +{
 +      u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
 +      hvc_opal_privs[index] = &hvc_opal_boot_priv;
 +      hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
 +      hvc_opal_boot_termno = index;
 +      udbg_init_opal_common();
 +}
 +#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_RAW */
 +
 +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI
 +void __init udbg_init_debug_opal_hvsi(void)
 +{
 +      u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
 +      hvc_opal_privs[index] = &hvc_opal_boot_priv;
 +      hvc_opal_boot_termno = index;
 +      udbg_init_opal_common();
 +      hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars,
 +                   index, 1);
 +      hvsilib_establish(&hvc_opal_boot_priv.hvsi);
 +}
 +#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI */
Simple merge
Simple merge
Simple merge
index 443e4fb9b8f33e66d01e00d7a808e68a54699622,0000000000000000000000000000000000000000..20d0503f318b2b8331a7c723f8e78e9520b966cd
mode 100644,000000..100644
--- /dev/null
@@@ -1,467 -1,0 +1,468 @@@
 +/**
 + * core.c - DesignWare USB3 DRD Controller Core file
 + *
 + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
 + * All rights reserved.
 + *
 + * Authors: Felipe Balbi <balbi@ti.com>,
 + *        Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 + *
 + * Redistribution and use in source and binary forms, with or without
 + * modification, are permitted provided that the following conditions
 + * are met:
 + * 1. Redistributions of source code must retain the above copyright
 + *    notice, this list of conditions, and the following disclaimer,
 + *    without modification.
 + * 2. Redistributions in binary form must reproduce the above copyright
 + *    notice, this list of conditions and the following disclaimer in the
 + *    documentation and/or other materials provided with the distribution.
 + * 3. The names of the above-listed copyright holders may not be used
 + *    to endorse or promote products derived from this software without
 + *    specific prior written permission.
 + *
 + * ALTERNATIVELY, this software may be distributed under the terms of the
 + * GNU General Public License ("GPL") version 2, as published by the Free
 + * Software Foundation.
 + *
 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 + */
 +
 +#include <linux/kernel.h>
 +#include <linux/slab.h>
 +#include <linux/spinlock.h>
 +#include <linux/platform_device.h>
 +#include <linux/pm_runtime.h>
 +#include <linux/interrupt.h>
 +#include <linux/ioport.h>
 +#include <linux/io.h>
 +#include <linux/list.h>
 +#include <linux/delay.h>
 +#include <linux/dma-mapping.h>
 +
 +#include <linux/usb/ch9.h>
 +#include <linux/usb/gadget.h>
++#include <linux/module.h>
 +
 +#include "core.h"
 +#include "gadget.h"
 +#include "io.h"
 +
 +#include "debug.h"
 +
 +/**
 + * dwc3_core_soft_reset - Issues core soft reset and PHY reset
 + * @dwc: pointer to our context structure
 + */
 +static void dwc3_core_soft_reset(struct dwc3 *dwc)
 +{
 +      u32             reg;
 +
 +      /* Before Resetting PHY, put Core in Reset */
 +      reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 +      reg |= DWC3_GCTL_CORESOFTRESET;
 +      dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 +
 +      /* Assert USB3 PHY reset */
 +      reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
 +      reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
 +      dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 +
 +      /* Assert USB2 PHY reset */
 +      reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 +      reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
 +      dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
 +
 +      mdelay(100);
 +
 +      /* Clear USB3 PHY reset */
 +      reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
 +      reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
 +      dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 +
 +      /* Clear USB2 PHY reset */
 +      reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 +      reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
 +      dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
 +
 +      /* After PHYs are stable we can take Core out of reset state */
 +      reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 +      reg &= ~DWC3_GCTL_CORESOFTRESET;
 +      dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 +}
 +
 +/**
 + * dwc3_free_one_event_buffer - Frees one event buffer
 + * @dwc: Pointer to our controller context structure
 + * @evt: Pointer to event buffer to be freed
 + */
 +static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
 +              struct dwc3_event_buffer *evt)
 +{
 +      dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
 +      kfree(evt);
 +}
 +
 +/**
 + * dwc3_alloc_one_event_buffer - Allocated one event buffer structure
 + * @dwc: Pointer to our controller context structure
 + * @length: size of the event buffer
 + *
 + * Returns a pointer to the allocated event buffer structure on succes
 + * otherwise ERR_PTR(errno).
 + */
 +static struct dwc3_event_buffer *__devinit
 +dwc3_alloc_one_event_buffer(struct dwc3 *dwc, unsigned length)
 +{
 +      struct dwc3_event_buffer        *evt;
 +
 +      evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 +      if (!evt)
 +              return ERR_PTR(-ENOMEM);
 +
 +      evt->dwc        = dwc;
 +      evt->length     = length;
 +      evt->buf        = dma_alloc_coherent(dwc->dev, length,
 +                      &evt->dma, GFP_KERNEL);
 +      if (!evt->buf) {
 +              kfree(evt);
 +              return ERR_PTR(-ENOMEM);
 +      }
 +
 +      return evt;
 +}
 +
 +/**
 + * dwc3_free_event_buffers - frees all allocated event buffers
 + * @dwc: Pointer to our controller context structure
 + */
 +static void dwc3_free_event_buffers(struct dwc3 *dwc)
 +{
 +      struct dwc3_event_buffer        *evt;
 +      int i;
 +
 +      for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
 +              evt = dwc->ev_buffs[i];
 +              if (evt) {
 +                      dwc3_free_one_event_buffer(dwc, evt);
 +                      dwc->ev_buffs[i] = NULL;
 +              }
 +      }
 +}
 +
 +/**
 + * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
 + * @dwc: Pointer to out controller context structure
 + * @num: number of event buffers to allocate
 + * @length: size of event buffer
 + *
 + * Returns 0 on success otherwise negative errno. In error the case, dwc
 + * may contain some buffers allocated but not all which were requested.
 + */
 +static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned num,
 +              unsigned length)
 +{
 +      int                     i;
 +
 +      for (i = 0; i < num; i++) {
 +              struct dwc3_event_buffer        *evt;
 +
 +              evt = dwc3_alloc_one_event_buffer(dwc, length);
 +              if (IS_ERR(evt)) {
 +                      dev_err(dwc->dev, "can't allocate event buffer\n");
 +                      return PTR_ERR(evt);
 +              }
 +              dwc->ev_buffs[i] = evt;
 +      }
 +
 +      return 0;
 +}
 +
 +/**
 + * dwc3_event_buffers_setup - setup our allocated event buffers
 + * @dwc: Pointer to out controller context structure
 + *
 + * Returns 0 on success otherwise negative errno.
 + */
 +static int __devinit dwc3_event_buffers_setup(struct dwc3 *dwc)
 +{
 +      struct dwc3_event_buffer        *evt;
 +      int                             n;
 +
 +      for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
 +              evt = dwc->ev_buffs[n];
 +              dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
 +                              evt->buf, (unsigned long long) evt->dma,
 +                              evt->length);
 +
 +              dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
 +                              lower_32_bits(evt->dma));
 +              dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
 +                              upper_32_bits(evt->dma));
 +              dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
 +                              evt->length & 0xffff);
 +              dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
 +      }
 +
 +      return 0;
 +}
 +
 +static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
 +{
 +      struct dwc3_event_buffer        *evt;
 +      int                             n;
 +
 +      for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
 +              evt = dwc->ev_buffs[n];
 +              dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
 +              dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
 +              dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), 0);
 +              dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
 +      }
 +}
 +
 +/**
 + * dwc3_core_init - Low-level initialization of DWC3 Core
 + * @dwc: Pointer to our controller context structure
 + *
 + * Returns 0 on success otherwise negative errno.
 + */
 +static int __devinit dwc3_core_init(struct dwc3 *dwc)
 +{
 +      unsigned long           timeout;
 +      u32                     reg;
 +      int                     ret;
 +
 +      dwc3_core_soft_reset(dwc);
 +
 +      /* issue device SoftReset too */
 +      timeout = jiffies + msecs_to_jiffies(500);
 +      dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
 +      do {
 +              reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 +              if (!(reg & DWC3_DCTL_CSFTRST))
 +                      break;
 +
 +              if (time_after(jiffies, timeout)) {
 +                      dev_err(dwc->dev, "Reset Timed Out\n");
 +                      ret = -ETIMEDOUT;
 +                      goto err0;
 +              }
 +
 +              cpu_relax();
 +      } while (true);
 +
 +      reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
 +      /* This should read as U3 followed by revision number */
 +      if ((reg & DWC3_GSNPSID_MASK) != 0x55330000) {
 +              dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
 +              ret = -ENODEV;
 +              goto err0;
 +      }
 +
 +      dwc->revision = reg & DWC3_GSNPSREV_MASK;
 +
 +      ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_NUM,
 +                      DWC3_EVENT_BUFFERS_SIZE);
 +      if (ret) {
 +              dev_err(dwc->dev, "failed to allocate event buffers\n");
 +              ret = -ENOMEM;
 +              goto err1;
 +      }
 +
 +      ret = dwc3_event_buffers_setup(dwc);
 +      if (ret) {
 +              dev_err(dwc->dev, "failed to setup event buffers\n");
 +              goto err1;
 +      }
 +
 +      return 0;
 +
 +err1:
 +      dwc3_free_event_buffers(dwc);
 +
 +err0:
 +      return ret;
 +}
 +
 +static void dwc3_core_exit(struct dwc3 *dwc)
 +{
 +      dwc3_event_buffers_cleanup(dwc);
 +      dwc3_free_event_buffers(dwc);
 +}
 +
 +#define DWC3_ALIGN_MASK               (16 - 1)
 +
 +static int __devinit dwc3_probe(struct platform_device *pdev)
 +{
 +      const struct platform_device_id *id = platform_get_device_id(pdev);
 +      struct resource         *res;
 +      struct dwc3             *dwc;
 +      void __iomem            *regs;
 +      unsigned int            features = id->driver_data;
 +      int                     ret = -ENOMEM;
 +      int                     irq;
 +      void                    *mem;
 +
 +      mem = kzalloc(sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
 +      if (!mem) {
 +              dev_err(&pdev->dev, "not enough memory\n");
 +              goto err0;
 +      }
 +      dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
 +      dwc->mem = mem;
 +
 +      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 +      if (!res) {
 +              dev_err(&pdev->dev, "missing resource\n");
 +              goto err1;
 +      }
 +
 +      res = request_mem_region(res->start, resource_size(res),
 +                      dev_name(&pdev->dev));
 +      if (!res) {
 +              dev_err(&pdev->dev, "can't request mem region\n");
 +              goto err1;
 +      }
 +
 +      regs = ioremap(res->start, resource_size(res));
 +      if (!regs) {
 +              dev_err(&pdev->dev, "ioremap failed\n");
 +              goto err2;
 +      }
 +
 +      irq = platform_get_irq(pdev, 0);
 +      if (irq < 0) {
 +              dev_err(&pdev->dev, "missing IRQ\n");
 +              goto err3;
 +      }
 +
 +      spin_lock_init(&dwc->lock);
 +      platform_set_drvdata(pdev, dwc);
 +
 +      dwc->regs       = regs;
 +      dwc->regs_size  = resource_size(res);
 +      dwc->dev        = &pdev->dev;
 +      dwc->irq        = irq;
 +
 +      pm_runtime_enable(&pdev->dev);
 +      pm_runtime_get_sync(&pdev->dev);
 +      pm_runtime_forbid(&pdev->dev);
 +
 +      ret = dwc3_core_init(dwc);
 +      if (ret) {
 +              dev_err(&pdev->dev, "failed to initialize core\n");
 +              goto err3;
 +      }
 +
 +      if (features & DWC3_HAS_PERIPHERAL) {
 +              ret = dwc3_gadget_init(dwc);
 +              if (ret) {
 +                      dev_err(&pdev->dev, "failed to initialized gadget\n");
 +                      goto err4;
 +              }
 +      }
 +
 +      ret = dwc3_debugfs_init(dwc);
 +      if (ret) {
 +              dev_err(&pdev->dev, "failed to initialize debugfs\n");
 +              goto err5;
 +      }
 +
 +      pm_runtime_allow(&pdev->dev);
 +
 +      return 0;
 +
 +err5:
 +      if (features & DWC3_HAS_PERIPHERAL)
 +              dwc3_gadget_exit(dwc);
 +
 +err4:
 +      dwc3_core_exit(dwc);
 +
 +err3:
 +      iounmap(regs);
 +
 +err2:
 +      release_mem_region(res->start, resource_size(res));
 +
 +err1:
 +      kfree(dwc->mem);
 +
 +err0:
 +      return ret;
 +}
 +
 +static int __devexit dwc3_remove(struct platform_device *pdev)
 +{
 +      const struct platform_device_id *id = platform_get_device_id(pdev);
 +      struct dwc3     *dwc = platform_get_drvdata(pdev);
 +      struct resource *res;
 +      unsigned int    features = id->driver_data;
 +
 +      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 +
 +      pm_runtime_put(&pdev->dev);
 +      pm_runtime_disable(&pdev->dev);
 +
 +      dwc3_debugfs_exit(dwc);
 +
 +      if (features & DWC3_HAS_PERIPHERAL)
 +              dwc3_gadget_exit(dwc);
 +
 +      dwc3_core_exit(dwc);
 +      release_mem_region(res->start, resource_size(res));
 +      iounmap(dwc->regs);
 +      kfree(dwc->mem);
 +
 +      return 0;
 +}
 +
 +static const struct platform_device_id dwc3_id_table[] __devinitconst = {
 +      {
 +              .name   = "dwc3-omap",
 +              .driver_data = (DWC3_HAS_PERIPHERAL
 +                      | DWC3_HAS_XHCI
 +                      | DWC3_HAS_OTG),
 +      },
 +      {
 +              .name   = "dwc3-pci",
 +              .driver_data = DWC3_HAS_PERIPHERAL,
 +      },
 +      {  },   /* Terminating Entry */
 +};
 +MODULE_DEVICE_TABLE(platform, dwc3_id_table);
 +
 +static struct platform_driver dwc3_driver = {
 +      .probe          = dwc3_probe,
 +      .remove         = __devexit_p(dwc3_remove),
 +      .driver         = {
 +              .name   = "dwc3",
 +      },
 +      .id_table       = dwc3_id_table,
 +};
 +
 +MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
 +MODULE_LICENSE("Dual BSD/GPL");
 +MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
 +
 +static int __devinit dwc3_init(void)
 +{
 +      return platform_driver_register(&dwc3_driver);
 +}
 +module_init(dwc3_init);
 +
 +static void __exit dwc3_exit(void)
 +{
 +      platform_driver_unregister(&dwc3_driver);
 +}
 +module_exit(dwc3_exit);
index 08fffe6d1a9ed510a1c7101df448fd771dc925de,0000000000000000000000000000000000000000..b4a654e571e143628aa2c0355f74824f0b7ffaf4
mode 100644,000000..100644
--- /dev/null
@@@ -1,410 -1,0 +1,411 @@@
 +/**
 + * dwc3-omap.c - OMAP Specific Glue layer
 + *
 + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
 + * All rights reserved.
 + *
 + * Authors: Felipe Balbi <balbi@ti.com>,
 + *        Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 + *
 + * Redistribution and use in source and binary forms, with or without
 + * modification, are permitted provided that the following conditions
 + * are met:
 + * 1. Redistributions of source code must retain the above copyright
 + *    notice, this list of conditions, and the following disclaimer,
 + *    without modification.
 + * 2. Redistributions in binary form must reproduce the above copyright
 + *    notice, this list of conditions and the following disclaimer in the
 + *    documentation and/or other materials provided with the distribution.
 + * 3. The names of the above-listed copyright holders may not be used
 + *    to endorse or promote products derived from this software without
 + *    specific prior written permission.
 + *
 + * ALTERNATIVELY, this software may be distributed under the terms of the
 + * GNU General Public License ("GPL") version 2, as published by the Free
 + * Software Foundation.
 + *
 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 + */
 +
 +#include <linux/kernel.h>
 +#include <linux/slab.h>
 +#include <linux/interrupt.h>
 +#include <linux/spinlock.h>
 +#include <linux/platform_device.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/ioport.h>
 +#include <linux/io.h>
++#include <linux/module.h>
 +
 +#include "io.h"
 +
 +/*
 + * All these registers belong to OMAP's Wrapper around the
 + * DesignWare USB3 Core.
 + */
 +
 +#define USBOTGSS_REVISION                     0x0000
 +#define USBOTGSS_SYSCONFIG                    0x0010
 +#define USBOTGSS_IRQ_EOI                      0x0020
 +#define USBOTGSS_IRQSTATUS_RAW_0              0x0024
 +#define USBOTGSS_IRQSTATUS_0                  0x0028
 +#define USBOTGSS_IRQENABLE_SET_0              0x002c
 +#define USBOTGSS_IRQENABLE_CLR_0              0x0030
 +#define USBOTGSS_IRQSTATUS_RAW_1              0x0034
 +#define USBOTGSS_IRQSTATUS_1                  0x0038
 +#define USBOTGSS_IRQENABLE_SET_1              0x003c
 +#define USBOTGSS_IRQENABLE_CLR_1              0x0040
 +#define USBOTGSS_UTMI_OTG_CTRL                        0x0080
 +#define USBOTGSS_UTMI_OTG_STATUS              0x0084
 +#define USBOTGSS_MMRAM_OFFSET                 0x0100
 +#define USBOTGSS_FLADJ                                0x0104
 +#define USBOTGSS_DEBUG_CFG                    0x0108
 +#define USBOTGSS_DEBUG_DATA                   0x010c
 +
 +/* SYSCONFIG REGISTER */
 +#define USBOTGSS_SYSCONFIG_DMADISABLE         (1 << 16)
 +#define USBOTGSS_SYSCONFIG_STANDBYMODE(x)     ((x) << 4)
 +#define USBOTGSS_SYSCONFIG_IDLEMODE(x)                ((x) << 2)
 +
 +/* IRQ_EOI REGISTER */
 +#define USBOTGSS_IRQ_EOI_LINE_NUMBER          (1 << 0)
 +
 +/* IRQS0 BITS */
 +#define USBOTGSS_IRQO_COREIRQ_ST              (1 << 0)
 +
 +/* IRQ1 BITS */
 +#define USBOTGSS_IRQ1_DMADISABLECLR           (1 << 17)
 +#define USBOTGSS_IRQ1_OEVT                    (1 << 16)
 +#define USBOTGSS_IRQ1_DRVVBUS_RISE            (1 << 13)
 +#define USBOTGSS_IRQ1_CHRGVBUS_RISE           (1 << 12)
 +#define USBOTGSS_IRQ1_DISCHRGVBUS_RISE                (1 << 11)
 +#define USBOTGSS_IRQ1_IDPULLUP_RISE           (1 << 8)
 +#define USBOTGSS_IRQ1_DRVVBUS_FALL            (1 << 5)
 +#define USBOTGSS_IRQ1_CHRGVBUS_FALL           (1 << 4)
 +#define USBOTGSS_IRQ1_DISCHRGVBUS_FALL                (1 << 3)
 +#define USBOTGSS_IRQ1_IDPULLUP_FALL           (1 << 0)
 +
 +/* UTMI_OTG_CTRL REGISTER */
 +#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS                (1 << 5)
 +#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS               (1 << 4)
 +#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS    (1 << 3)
 +#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP               (1 << 0)
 +
 +/* UTMI_OTG_STATUS REGISTER */
 +#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE      (1 << 31)
 +#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT (1 << 9)
 +#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8)
 +#define USBOTGSS_UTMI_OTG_STATUS_IDDIG                (1 << 4)
 +#define USBOTGSS_UTMI_OTG_STATUS_SESSEND      (1 << 3)
 +#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID    (1 << 2)
 +#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID    (1 << 1)
 +
 +struct dwc3_omap {
 +      /* device lock */
 +      spinlock_t              lock;
 +
 +      struct platform_device  *dwc3;
 +      struct device           *dev;
 +
 +      int                     irq;
 +      void __iomem            *base;
 +
 +      void                    *context;
 +      u32                     resource_size;
 +
 +      u32                     dma_status:1;
 +};
 +
 +#ifdef CONFIG_PM
 +static int dwc3_omap_suspend(struct device *dev)
 +{
 +      struct dwc3_omap        *omap = dev_get_drvdata(dev);
 +
 +      memcpy_fromio(omap->context, omap->base, omap->resource_size);
 +
 +      return 0;
 +}
 +
 +static int dwc3_omap_resume(struct device *dev)
 +{
 +      struct dwc3_omap        *omap = dev_get_drvdata(dev);
 +
 +      memcpy_toio(omap->base, omap->context, omap->resource_size);
 +
 +      return 0;
 +}
 +
 +static int dwc3_omap_idle(struct device *dev)
 +{
 +      struct dwc3_omap        *omap = dev_get_drvdata(dev);
 +      u32                     reg;
 +
 +      /* stop DMA Engine */
 +      reg = dwc3_readl(omap->base, USBOTGSS_SYSCONFIG);
 +      reg &= ~(USBOTGSS_SYSCONFIG_DMADISABLE);
 +      dwc3_writel(omap->base, USBOTGSS_SYSCONFIG, reg);
 +
 +      return 0;
 +}
 +
 +static UNIVERSAL_DEV_PM_OPS(dwc3_omap_pm_ops, dwc3_omap_suspend,
 +              dwc3_omap_resume, dwc3_omap_idle);
 +
 +#define DEV_PM_OPS    (&dwc3_omap_pm_ops)
 +#else
 +#define DEV_PM_OPS    NULL
 +#endif
 +
 +static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
 +{
 +      struct dwc3_omap        *omap = _omap;
 +      u32                     reg;
 +      u32                     ctrl;
 +
 +      spin_lock(&omap->lock);
 +
 +      reg = dwc3_readl(omap->base, USBOTGSS_IRQSTATUS_1);
 +      ctrl = dwc3_readl(omap->base, USBOTGSS_UTMI_OTG_CTRL);
 +
 +      if (reg & USBOTGSS_IRQ1_DMADISABLECLR) {
 +              dev_dbg(omap->base, "DMA Disable was Cleared\n");
 +              omap->dma_status = false;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_OEVT)
 +              dev_dbg(omap->base, "OTG Event\n");
 +
 +      if (reg & USBOTGSS_IRQ1_DRVVBUS_RISE) {
 +              dev_dbg(omap->base, "DRVVBUS Rise\n");
 +              ctrl |= USBOTGSS_UTMI_OTG_CTRL_DRVVBUS;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_CHRGVBUS_RISE) {
 +              dev_dbg(omap->base, "CHRGVBUS Rise\n");
 +              ctrl |= USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_RISE) {
 +              dev_dbg(omap->base, "DISCHRGVBUS Rise\n");
 +              ctrl |= USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_IDPULLUP_RISE) {
 +              dev_dbg(omap->base, "IDPULLUP Rise\n");
 +              ctrl |= USBOTGSS_UTMI_OTG_CTRL_IDPULLUP;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_DRVVBUS_FALL) {
 +              dev_dbg(omap->base, "DRVVBUS Fall\n");
 +              ctrl &= ~USBOTGSS_UTMI_OTG_CTRL_DRVVBUS;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_CHRGVBUS_FALL) {
 +              dev_dbg(omap->base, "CHRGVBUS Fall\n");
 +              ctrl &= ~USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_FALL) {
 +              dev_dbg(omap->base, "DISCHRGVBUS Fall\n");
 +              ctrl &= ~USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS;
 +      }
 +
 +      if (reg & USBOTGSS_IRQ1_IDPULLUP_FALL) {
 +              dev_dbg(omap->base, "IDPULLUP Fall\n");
 +              ctrl &= ~USBOTGSS_UTMI_OTG_CTRL_IDPULLUP;
 +      }
 +
 +      dwc3_writel(omap->base, USBOTGSS_UTMI_OTG_CTRL, ctrl);
 +
 +      spin_unlock(&omap->lock);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static int __devinit dwc3_omap_probe(struct platform_device *pdev)
 +{
 +      struct platform_device  *dwc3;
 +      struct dwc3_omap        *omap;
 +      struct resource         *res;
 +
 +      int                     ret = -ENOMEM;
 +      int                     irq;
 +
 +      u32                     reg;
 +
 +      void __iomem            *base;
 +      void                    *context;
 +
 +      omap = kzalloc(sizeof(*omap), GFP_KERNEL);
 +      if (!omap) {
 +              dev_err(&pdev->dev, "not enough memory\n");
 +              goto err0;
 +      }
 +
 +      platform_set_drvdata(pdev, omap);
 +
 +      irq = platform_get_irq(pdev, 1);
 +      if (irq < 0) {
 +              dev_err(&pdev->dev, "missing IRQ resource\n");
 +              ret = -EINVAL;
 +              goto err1;
 +      }
 +
 +      res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 +      if (!res) {
 +              dev_err(&pdev->dev, "missing memory base resource\n");
 +              ret = -EINVAL;
 +              goto err1;
 +      }
 +
 +      base = ioremap_nocache(res->start, resource_size(res));
 +      if (!base) {
 +              dev_err(&pdev->dev, "ioremap failed\n");
 +              goto err1;
 +      }
 +
 +      dwc3 = platform_device_alloc("dwc3-omap", -1);
 +      if (!dwc3) {
 +              dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
 +              goto err2;
 +      }
 +
 +      context = kzalloc(resource_size(res), GFP_KERNEL);
 +      if (!context) {
 +              dev_err(&pdev->dev, "couldn't allocate dwc3 context memory\n");
 +              goto err3;
 +      }
 +
 +      spin_lock_init(&omap->lock);
 +      dma_set_coherent_mask(&dwc3->dev, pdev->dev.coherent_dma_mask);
 +
 +      dwc3->dev.parent = &pdev->dev;
 +      dwc3->dev.dma_mask = pdev->dev.dma_mask;
 +      dwc3->dev.dma_parms = pdev->dev.dma_parms;
 +      omap->resource_size = resource_size(res);
 +      omap->context   = context;
 +      omap->dev       = &pdev->dev;
 +      omap->irq       = irq;
 +      omap->base      = base;
 +      omap->dwc3      = dwc3;
 +
 +      /* check the DMA Status */
 +      reg = dwc3_readl(omap->base, USBOTGSS_SYSCONFIG);
 +      omap->dma_status = !!(reg & USBOTGSS_SYSCONFIG_DMADISABLE);
 +
 +      ret = request_irq(omap->irq, dwc3_omap_interrupt, 0,
 +                      "dwc3-wrapper", omap);
 +      if (ret) {
 +              dev_err(&pdev->dev, "failed to request IRQ #%d --> %d\n",
 +                              omap->irq, ret);
 +              goto err4;
 +      }
 +
 +      /* enable all IRQs */
 +      dwc3_writel(omap->base, USBOTGSS_IRQENABLE_SET_0, 0x01);
 +
 +      reg = (USBOTGSS_IRQ1_DMADISABLECLR |
 +                      USBOTGSS_IRQ1_OEVT |
 +                      USBOTGSS_IRQ1_DRVVBUS_RISE |
 +                      USBOTGSS_IRQ1_CHRGVBUS_RISE |
 +                      USBOTGSS_IRQ1_DISCHRGVBUS_RISE |
 +                      USBOTGSS_IRQ1_IDPULLUP_RISE |
 +                      USBOTGSS_IRQ1_DRVVBUS_FALL |
 +                      USBOTGSS_IRQ1_CHRGVBUS_FALL |
 +                      USBOTGSS_IRQ1_DISCHRGVBUS_FALL |
 +                      USBOTGSS_IRQ1_IDPULLUP_FALL);
 +
 +      dwc3_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, reg);
 +
 +      ret = platform_device_add_resources(dwc3, pdev->resource,
 +                      pdev->num_resources);
 +      if (ret) {
 +              dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
 +              goto err5;
 +      }
 +
 +      ret = platform_device_add(dwc3);
 +      if (ret) {
 +              dev_err(&pdev->dev, "failed to register dwc3 device\n");
 +              goto err5;
 +      }
 +
 +      return 0;
 +
 +err5:
 +      free_irq(omap->irq, omap);
 +
 +err4:
 +      kfree(omap->context);
 +
 +err3:
 +      platform_device_put(dwc3);
 +
 +err2:
 +      iounmap(base);
 +
 +err1:
 +      kfree(omap);
 +
 +err0:
 +      return ret;
 +}
 +
 +static int __devexit dwc3_omap_remove(struct platform_device *pdev)
 +{
 +      struct dwc3_omap        *omap = platform_get_drvdata(pdev);
 +
 +      platform_device_unregister(omap->dwc3);
 +
 +      free_irq(omap->irq, omap);
 +      iounmap(omap->base);
 +
 +      kfree(omap->context);
 +      kfree(omap);
 +
 +      return 0;
 +}
 +
 +static const struct of_device_id of_dwc3_matach[] = {
 +      {
 +              "ti,dwc3",
 +      },
 +      { },
 +};
 +MODULE_DEVICE_TABLE(of, of_dwc3_matach);
 +
 +static struct platform_driver dwc3_omap_driver = {
 +      .probe          = dwc3_omap_probe,
 +      .remove         = __devexit_p(dwc3_omap_remove),
 +      .driver         = {
 +              .name   = "omap-dwc3",
 +              .pm     = DEV_PM_OPS,
 +              .of_match_table = of_dwc3_matach,
 +      },
 +};
 +
 +MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
 +MODULE_LICENSE("Dual BSD/GPL");
 +MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
 +
 +static int __devinit dwc3_omap_init(void)
 +{
 +      return platform_driver_register(&dwc3_omap_driver);
 +}
 +module_init(dwc3_omap_init);
 +
 +static void __exit dwc3_omap_exit(void)
 +{
 +      platform_driver_unregister(&dwc3_omap_driver);
 +}
 +module_exit(dwc3_omap_exit);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 97ab8bac24945545630cf648eacdcab1d8c9d325,499d5489201754d1c99a1166395d564363ca3b7f..4636f9dc30b00088991179c8b8005cfa9e8f6507
@@@ -23,8 -23,8 +23,9 @@@
  #include <linux/console.h>
  #include <linux/backlight.h>
  #include <linux/gpio.h>
+ #include <linux/module.h>
  #include <video/sh_mobile_lcdc.h>
 +#include <video/sh_mobile_meram.h>
  #include <linux/atomic.h>
  
  #include "sh_mobile_lcdcfb.h"
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc fs/exofs/ore.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc fs/nfs/write.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index ace51af4369f668ea647f47c2d87f76d4478b121,1ceff5ae9d31c2df27b1e6bcc46df342a5b43125..75f53f874b24a0c0abb790f501f2f60ace48e17e
  #include <linux/device.h>
  #include <linux/uio.h>
  #include <linux/dma-direction.h>
 +#include <linux/scatterlist.h>
+ #include <linux/bitmap.h>
+ #include <asm/page.h>
  
 -struct scatterlist;
 -
  /**
   * typedef dma_cookie_t - an opaque DMA cookie
   *
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc kernel/async.c
Simple merge
Simple merge
diff --cc kernel/cred.c
Simple merge
Simple merge
diff --cc kernel/fork.c
Simple merge
diff --cc kernel/futex.c
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc kernel/pid.c
Simple merge
Simple merge
Simple merge
index 1c1797dd1d1d3f05c3260f47e0f6c22efc1c815c,e3969fd2e1a21e4b4cbb183c58b4126c8cd33c23..2c0a65e276264f369e7e4775163f1a4b3f24e729
  #include <linux/kernel.h>
  
  #include <linux/uaccess.h>
+ #include <linux/export.h>
  
  /*
 - * locking rule: all changes to requests or notifiers lists
 + * locking rule: all changes to constraints or notifiers lists
   * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
   * held, taken with _irqsave.  One lock to rule them all
   */
Simple merge
diff --cc kernel/ptrace.c
Simple merge
index 1a491edd9c09b39a42afb90561b230c00bef32d5,3c82e32d67f99bfb0216880d04c3a82dbbc625a9..f63ea4022d785e68a1565bbc6a28615e0a99740b
  #include <linux/notifier.h>
  #include <linux/cpu.h>
  #include <linux/mutex.h>
- #include <linux/module.h>
+ #include <linux/export.h>
  #include <linux/hardirq.h>
  
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/rcu.h>
 +
 +#include "rcu.h"
 +
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  static struct lock_class_key rcu_lock_key;
  struct lockdep_map rcu_lock_map =
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc kernel/sched.c
Simple merge
Simple merge
diff --cc kernel/signal.c
Simple merge
Simple merge
diff --cc kernel/sys.c
Simple merge
diff --cc kernel/time.c
Simple merge
Simple merge
Simple merge
diff --cc lib/dma-debug.c
Simple merge
diff --cc mm/memcontrol.c
Simple merge
diff --cc mm/swapfile.c
Simple merge
diff --cc net/802/garp.c
Simple merge
diff --cc net/802/stp.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc net/ipv4/ipmr.c
Simple merge
diff --cc net/ipv4/proc.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc net/ipv6/raw.c
Simple merge
Simple merge
diff --cc net/irda/qos.c
Simple merge
Simple merge
Simple merge
Simple merge
index a42869c9d341869fdfbd05e53a410cc86ea0c2b4,b6522bbcd644e6b8ae4dcf96a63d5b1064590cc5..06b2dffb6fa7d782b947d60ceb3e585b09dcdb08
  #include <linux/skbuff.h>
  #include <linux/if_arp.h>
  #include <linux/etherdevice.h>
+ #include <linux/moduleparam.h>
  #include <linux/rtnetlink.h>
 -#include <linux/pm_qos_params.h>
 +#include <linux/pm_qos.h>
  #include <linux/crc32.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  #include <net/mac80211.h>
  #include <asm/unaligned.h>
  
Simple merge
Simple merge
index d262a2519ee91567c040362fcb0fcc56e974eb05,8bbaa8d88f3f14c509ef0866f8431b741e739601..290f4a047756c7978e4afa9bed315c336d2eec2c
  
  #include <linux/if_arp.h>
  #include <linux/rtnetlink.h>
 -#include <linux/pm_qos_params.h>
 +#include <linux/pm_qos.h>
  #include <net/sch_generic.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  #include <net/mac80211.h>
  
  #include "ieee80211_i.h"
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 4047e29acb3b0f1eeac3d030c8ae7642399d80f1,0000000000000000000000000000000000000000..3bfabcb2e07c75a53153fb6ab4277b94ce54e459
mode 100644,000000..100644
--- /dev/null
@@@ -1,797 -1,0 +1,798 @@@
 +/*
 + *  The NFC Controller Interface is the communication protocol between an
 + *  NFC Controller (NFCC) and a Device Host (DH).
 + *
 + *  Copyright (C) 2011 Texas Instruments, Inc.
 + *
 + *  Written by Ilan Elias <ilane@ti.com>
 + *
 + *  Acknowledgements:
 + *  This file is based on hci_core.c, which was written
 + *  by Maxim Krasnyansky.
 + *
 + *  This program is free software; you can redistribute it and/or modify
 + *  it under the terms of the GNU General Public License version 2
 + *  as published by the Free Software Foundation
 + *
 + *  This program is distributed in the hope that it will be useful,
 + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + *  GNU General Public License for more details.
 + *
 + *  You should have received a copy of the GNU General Public License
 + *  along with this program; if not, write to the Free Software
 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 + *
 + */
 +
 +#include <linux/types.h>
 +#include <linux/workqueue.h>
 +#include <linux/completion.h>
 +#include <linux/sched.h>
 +#include <linux/bitops.h>
 +#include <linux/skbuff.h>
 +
 +#include "../nfc.h"
 +#include <net/nfc/nci.h>
 +#include <net/nfc/nci_core.h>
 +#include <linux/nfc.h>
++#include <linux/export.h>
 +
 +static void nci_cmd_work(struct work_struct *work);
 +static void nci_rx_work(struct work_struct *work);
 +static void nci_tx_work(struct work_struct *work);
 +
 +/* ---- NCI requests ---- */
 +
 +void nci_req_complete(struct nci_dev *ndev, int result)
 +{
 +      if (ndev->req_status == NCI_REQ_PEND) {
 +              ndev->req_result = result;
 +              ndev->req_status = NCI_REQ_DONE;
 +              complete(&ndev->req_completion);
 +      }
 +}
 +
 +static void nci_req_cancel(struct nci_dev *ndev, int err)
 +{
 +      if (ndev->req_status == NCI_REQ_PEND) {
 +              ndev->req_result = err;
 +              ndev->req_status = NCI_REQ_CANCELED;
 +              complete(&ndev->req_completion);
 +      }
 +}
 +
 +/* Execute request and wait for completion. */
 +static int __nci_request(struct nci_dev *ndev,
 +      void (*req)(struct nci_dev *ndev, unsigned long opt),
 +      unsigned long opt,
 +      __u32 timeout)
 +{
 +      int rc = 0;
 +      unsigned long completion_rc;
 +
 +      ndev->req_status = NCI_REQ_PEND;
 +
 +      init_completion(&ndev->req_completion);
 +      req(ndev, opt);
 +      completion_rc = wait_for_completion_interruptible_timeout(
 +                                                      &ndev->req_completion,
 +                                                      timeout);
 +
 +      nfc_dbg("wait_for_completion return %ld", completion_rc);
 +
 +      if (completion_rc > 0) {
 +              switch (ndev->req_status) {
 +              case NCI_REQ_DONE:
 +                      rc = nci_to_errno(ndev->req_result);
 +                      break;
 +
 +              case NCI_REQ_CANCELED:
 +                      rc = -ndev->req_result;
 +                      break;
 +
 +              default:
 +                      rc = -ETIMEDOUT;
 +                      break;
 +              }
 +      } else {
 +              nfc_err("wait_for_completion_interruptible_timeout failed %ld",
 +                      completion_rc);
 +
 +              rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
 +      }
 +
 +      ndev->req_status = ndev->req_result = 0;
 +
 +      return rc;
 +}
 +
 +static inline int nci_request(struct nci_dev *ndev,
 +              void (*req)(struct nci_dev *ndev, unsigned long opt),
 +              unsigned long opt, __u32 timeout)
 +{
 +      int rc;
 +
 +      if (!test_bit(NCI_UP, &ndev->flags))
 +              return -ENETDOWN;
 +
 +      /* Serialize all requests */
 +      mutex_lock(&ndev->req_lock);
 +      rc = __nci_request(ndev, req, opt, timeout);
 +      mutex_unlock(&ndev->req_lock);
 +
 +      return rc;
 +}
 +
 +static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
 +{
 +      nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
 +}
 +
 +static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
 +{
 +      nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
 +}
 +
 +static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
 +{
 +      struct nci_core_conn_create_cmd conn_cmd;
 +      struct nci_rf_disc_map_cmd cmd;
 +      struct disc_map_config *cfg = cmd.mapping_configs;
 +      __u8 *num = &cmd.num_mapping_configs;
 +      int i;
 +
 +      /* create static rf connection */
 +      conn_cmd.target_handle = 0;
 +      conn_cmd.num_target_specific_params = 0;
 +      nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
 +
 +      /* set rf mapping configurations */
 +      *num = 0;
 +
 +      /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
 +      for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
 +              if (ndev->supported_rf_interfaces[i] ==
 +                      NCI_RF_INTERFACE_ISO_DEP) {
 +                      cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
 +                      cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
 +                      cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
 +                      (*num)++;
 +              } else if (ndev->supported_rf_interfaces[i] ==
 +                      NCI_RF_INTERFACE_NFC_DEP) {
 +                      cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
 +                      cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
 +                      cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
 +                      (*num)++;
 +              }
 +
 +              if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
 +                      break;
 +      }
 +
 +      nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
 +              (1 + ((*num)*sizeof(struct disc_map_config))),
 +              &cmd);
 +}
 +
 +static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
 +{
 +      struct nci_rf_disc_cmd cmd;
 +      __u32 protocols = opt;
 +
 +      cmd.num_disc_configs = 0;
 +
 +      if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 +              (protocols & NFC_PROTO_JEWEL_MASK
 +              || protocols & NFC_PROTO_MIFARE_MASK
 +              || protocols & NFC_PROTO_ISO14443_MASK
 +              || protocols & NFC_PROTO_NFC_DEP_MASK)) {
 +              cmd.disc_configs[cmd.num_disc_configs].type =
 +              NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
 +              cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 +              cmd.num_disc_configs++;
 +      }
 +
 +      if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 +              (protocols & NFC_PROTO_ISO14443_MASK)) {
 +              cmd.disc_configs[cmd.num_disc_configs].type =
 +              NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
 +              cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 +              cmd.num_disc_configs++;
 +      }
 +
 +      if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 +              (protocols & NFC_PROTO_FELICA_MASK
 +              || protocols & NFC_PROTO_NFC_DEP_MASK)) {
 +              cmd.disc_configs[cmd.num_disc_configs].type =
 +              NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
 +              cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 +              cmd.num_disc_configs++;
 +      }
 +
 +      nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
 +              (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
 +              &cmd);
 +}
 +
 +static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
 +{
 +      struct nci_rf_deactivate_cmd cmd;
 +
 +      cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
 +
 +      nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
 +                      sizeof(struct nci_rf_deactivate_cmd),
 +                      &cmd);
 +}
 +
 +static int nci_open_device(struct nci_dev *ndev)
 +{
 +      int rc = 0;
 +
 +      mutex_lock(&ndev->req_lock);
 +
 +      if (test_bit(NCI_UP, &ndev->flags)) {
 +              rc = -EALREADY;
 +              goto done;
 +      }
 +
 +      if (ndev->ops->open(ndev)) {
 +              rc = -EIO;
 +              goto done;
 +      }
 +
 +      atomic_set(&ndev->cmd_cnt, 1);
 +
 +      set_bit(NCI_INIT, &ndev->flags);
 +
 +      rc = __nci_request(ndev, nci_reset_req, 0,
 +                              msecs_to_jiffies(NCI_RESET_TIMEOUT));
 +
 +      if (!rc) {
 +              rc = __nci_request(ndev, nci_init_req, 0,
 +                              msecs_to_jiffies(NCI_INIT_TIMEOUT));
 +      }
 +
 +      if (!rc) {
 +              rc = __nci_request(ndev, nci_init_complete_req, 0,
 +                              msecs_to_jiffies(NCI_INIT_TIMEOUT));
 +      }
 +
 +      clear_bit(NCI_INIT, &ndev->flags);
 +
 +      if (!rc) {
 +              set_bit(NCI_UP, &ndev->flags);
 +      } else {
 +              /* Init failed, cleanup */
 +              skb_queue_purge(&ndev->cmd_q);
 +              skb_queue_purge(&ndev->rx_q);
 +              skb_queue_purge(&ndev->tx_q);
 +
 +              ndev->ops->close(ndev);
 +              ndev->flags = 0;
 +      }
 +
 +done:
 +      mutex_unlock(&ndev->req_lock);
 +      return rc;
 +}
 +
 +static int nci_close_device(struct nci_dev *ndev)
 +{
 +      nci_req_cancel(ndev, ENODEV);
 +      mutex_lock(&ndev->req_lock);
 +
 +      if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
 +              del_timer_sync(&ndev->cmd_timer);
 +              mutex_unlock(&ndev->req_lock);
 +              return 0;
 +      }
 +
 +      /* Drop RX and TX queues */
 +      skb_queue_purge(&ndev->rx_q);
 +      skb_queue_purge(&ndev->tx_q);
 +
 +      /* Flush RX and TX wq */
 +      flush_workqueue(ndev->rx_wq);
 +      flush_workqueue(ndev->tx_wq);
 +
 +      /* Reset device */
 +      skb_queue_purge(&ndev->cmd_q);
 +      atomic_set(&ndev->cmd_cnt, 1);
 +
 +      set_bit(NCI_INIT, &ndev->flags);
 +      __nci_request(ndev, nci_reset_req, 0,
 +                              msecs_to_jiffies(NCI_RESET_TIMEOUT));
 +      clear_bit(NCI_INIT, &ndev->flags);
 +
 +      /* Flush cmd wq */
 +      flush_workqueue(ndev->cmd_wq);
 +
 +      /* After this point our queues are empty
 +       * and no works are scheduled. */
 +      ndev->ops->close(ndev);
 +
 +      /* Clear flags */
 +      ndev->flags = 0;
 +
 +      mutex_unlock(&ndev->req_lock);
 +
 +      return 0;
 +}
 +
 +/* NCI command timer function */
 +static void nci_cmd_timer(unsigned long arg)
 +{
 +      struct nci_dev *ndev = (void *) arg;
 +
 +      nfc_dbg("entry");
 +
 +      atomic_set(&ndev->cmd_cnt, 1);
 +      queue_work(ndev->cmd_wq, &ndev->cmd_work);
 +}
 +
 +static int nci_dev_up(struct nfc_dev *nfc_dev)
 +{
 +      struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 +
 +      nfc_dbg("entry");
 +
 +      return nci_open_device(ndev);
 +}
 +
 +static int nci_dev_down(struct nfc_dev *nfc_dev)
 +{
 +      struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 +
 +      nfc_dbg("entry");
 +
 +      return nci_close_device(ndev);
 +}
 +
 +static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
 +{
 +      struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 +      int rc;
 +
 +      nfc_dbg("entry");
 +
 +      if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
 +              nfc_err("unable to start poll, since poll is already active");
 +              return -EBUSY;
 +      }
 +
 +      if (ndev->target_active_prot) {
 +              nfc_err("there is an active target");
 +              return -EBUSY;
 +      }
 +
 +      if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
 +              nfc_dbg("target is active, implicitly deactivate...");
 +
 +              rc = nci_request(ndev, nci_rf_deactivate_req, 0,
 +                      msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 +              if (rc)
 +                      return -EBUSY;
 +      }
 +
 +      rc = nci_request(ndev, nci_rf_discover_req, protocols,
 +              msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
 +
 +      if (!rc)
 +              ndev->poll_prots = protocols;
 +
 +      return rc;
 +}
 +
 +static void nci_stop_poll(struct nfc_dev *nfc_dev)
 +{
 +      struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 +
 +      nfc_dbg("entry");
 +
 +      if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
 +              nfc_err("unable to stop poll, since poll is not active");
 +              return;
 +      }
 +
 +      nci_request(ndev, nci_rf_deactivate_req, 0,
 +              msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 +}
 +
 +static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
 +                              __u32 protocol)
 +{
 +      struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 +
 +      nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
 +
 +      if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
 +              nfc_err("there is no available target to activate");
 +              return -EINVAL;
 +      }
 +
 +      if (ndev->target_active_prot) {
 +              nfc_err("there is already an active target");
 +              return -EBUSY;
 +      }
 +
 +      if (!(ndev->target_available_prots & (1 << protocol))) {
 +              nfc_err("target does not support the requested protocol 0x%x",
 +                      protocol);
 +              return -EINVAL;
 +      }
 +
 +      ndev->target_active_prot = protocol;
 +      ndev->target_available_prots = 0;
 +
 +      return 0;
 +}
 +
 +static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
 +{
 +      struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 +
 +      nfc_dbg("entry, target_idx %d", target_idx);
 +
 +      if (!ndev->target_active_prot) {
 +              nfc_err("unable to deactivate target, no active target");
 +              return;
 +      }
 +
 +      ndev->target_active_prot = 0;
 +
 +      if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
 +              nci_request(ndev, nci_rf_deactivate_req, 0,
 +                      msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 +      }
 +}
 +
 +static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
 +                                              struct sk_buff *skb,
 +                                              data_exchange_cb_t cb,
 +                                              void *cb_context)
 +{
 +      struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 +      int rc;
 +
 +      nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
 +
 +      if (!ndev->target_active_prot) {
 +              nfc_err("unable to exchange data, no active target");
 +              return -EINVAL;
 +      }
 +
 +      if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
 +              return -EBUSY;
 +
 +      /* store cb and context to be used on receiving data */
 +      ndev->data_exchange_cb = cb;
 +      ndev->data_exchange_cb_context = cb_context;
 +
 +      rc = nci_send_data(ndev, ndev->conn_id, skb);
 +      if (rc)
 +              clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
 +
 +      return rc;
 +}
 +
 +static struct nfc_ops nci_nfc_ops = {
 +      .dev_up = nci_dev_up,
 +      .dev_down = nci_dev_down,
 +      .start_poll = nci_start_poll,
 +      .stop_poll = nci_stop_poll,
 +      .activate_target = nci_activate_target,
 +      .deactivate_target = nci_deactivate_target,
 +      .data_exchange = nci_data_exchange,
 +};
 +
 +/* ---- Interface to NCI drivers ---- */
 +
 +/**
 + * nci_allocate_device - allocate a new nci device
 + *
 + * @ops: device operations
 + * @supported_protocols: NFC protocols supported by the device
 + */
 +struct nci_dev *nci_allocate_device(struct nci_ops *ops,
 +                                      __u32 supported_protocols,
 +                                      int tx_headroom,
 +                                      int tx_tailroom)
 +{
 +      struct nci_dev *ndev;
 +
 +      nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
 +
 +      if (!ops->open || !ops->close || !ops->send)
 +              return NULL;
 +
 +      if (!supported_protocols)
 +              return NULL;
 +
 +      ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
 +      if (!ndev)
 +              return NULL;
 +
 +      ndev->ops = ops;
 +      ndev->tx_headroom = tx_headroom;
 +      ndev->tx_tailroom = tx_tailroom;
 +
 +      ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
 +                                              supported_protocols,
 +                                              tx_headroom + NCI_DATA_HDR_SIZE,
 +                                              tx_tailroom);
 +      if (!ndev->nfc_dev)
 +              goto free_exit;
 +
 +      nfc_set_drvdata(ndev->nfc_dev, ndev);
 +
 +      return ndev;
 +
 +free_exit:
 +      kfree(ndev);
 +      return NULL;
 +}
 +EXPORT_SYMBOL(nci_allocate_device);
 +
 +/**
 + * nci_free_device - deallocate nci device
 + *
 + * @ndev: The nci device to deallocate
 + */
 +void nci_free_device(struct nci_dev *ndev)
 +{
 +      nfc_dbg("entry");
 +
 +      nfc_free_device(ndev->nfc_dev);
 +      kfree(ndev);
 +}
 +EXPORT_SYMBOL(nci_free_device);
 +
 +/**
 + * nci_register_device - register a nci device in the nfc subsystem
 + *
 + * @dev: The nci device to register
 + */
 +int nci_register_device(struct nci_dev *ndev)
 +{
 +      int rc;
 +      struct device *dev = &ndev->nfc_dev->dev;
 +      char name[32];
 +
 +      nfc_dbg("entry");
 +
 +      rc = nfc_register_device(ndev->nfc_dev);
 +      if (rc)
 +              goto exit;
 +
 +      ndev->flags = 0;
 +
 +      INIT_WORK(&ndev->cmd_work, nci_cmd_work);
 +      snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
 +      ndev->cmd_wq = create_singlethread_workqueue(name);
 +      if (!ndev->cmd_wq) {
 +              rc = -ENOMEM;
 +              goto unreg_exit;
 +      }
 +
 +      INIT_WORK(&ndev->rx_work, nci_rx_work);
 +      snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
 +      ndev->rx_wq = create_singlethread_workqueue(name);
 +      if (!ndev->rx_wq) {
 +              rc = -ENOMEM;
 +              goto destroy_cmd_wq_exit;
 +      }
 +
 +      INIT_WORK(&ndev->tx_work, nci_tx_work);
 +      snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
 +      ndev->tx_wq = create_singlethread_workqueue(name);
 +      if (!ndev->tx_wq) {
 +              rc = -ENOMEM;
 +              goto destroy_rx_wq_exit;
 +      }
 +
 +      skb_queue_head_init(&ndev->cmd_q);
 +      skb_queue_head_init(&ndev->rx_q);
 +      skb_queue_head_init(&ndev->tx_q);
 +
 +      setup_timer(&ndev->cmd_timer, nci_cmd_timer,
 +                      (unsigned long) ndev);
 +
 +      mutex_init(&ndev->req_lock);
 +
 +      goto exit;
 +
 +destroy_rx_wq_exit:
 +      destroy_workqueue(ndev->rx_wq);
 +
 +destroy_cmd_wq_exit:
 +      destroy_workqueue(ndev->cmd_wq);
 +
 +unreg_exit:
 +      nfc_unregister_device(ndev->nfc_dev);
 +
 +exit:
 +      return rc;
 +}
 +EXPORT_SYMBOL(nci_register_device);
 +
 +/**
 + * nci_unregister_device - unregister a nci device in the nfc subsystem
 + *
 + * @dev: The nci device to unregister
 + */
 +void nci_unregister_device(struct nci_dev *ndev)
 +{
 +      nfc_dbg("entry");
 +
 +      nci_close_device(ndev);
 +
 +      destroy_workqueue(ndev->cmd_wq);
 +      destroy_workqueue(ndev->rx_wq);
 +      destroy_workqueue(ndev->tx_wq);
 +
 +      nfc_unregister_device(ndev->nfc_dev);
 +}
 +EXPORT_SYMBOL(nci_unregister_device);
 +
 +/**
 + * nci_recv_frame - receive frame from NCI drivers
 + *
 + * @skb: The sk_buff to receive
 + */
 +int nci_recv_frame(struct sk_buff *skb)
 +{
 +      struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 +
 +      nfc_dbg("entry, len %d", skb->len);
 +
 +      if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
 +              && !test_bit(NCI_INIT, &ndev->flags))) {
 +              kfree_skb(skb);
 +              return -ENXIO;
 +      }
 +
 +      /* Queue frame for rx worker thread */
 +      skb_queue_tail(&ndev->rx_q, skb);
 +      queue_work(ndev->rx_wq, &ndev->rx_work);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL(nci_recv_frame);
 +
 +static int nci_send_frame(struct sk_buff *skb)
 +{
 +      struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 +
 +      nfc_dbg("entry, len %d", skb->len);
 +
 +      if (!ndev) {
 +              kfree_skb(skb);
 +              return -ENODEV;
 +      }
 +
 +      /* Get rid of skb owner, prior to sending to the driver. */
 +      skb_orphan(skb);
 +
 +      return ndev->ops->send(skb);
 +}
 +
 +/* Send NCI command */
 +int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
 +{
 +      struct nci_ctrl_hdr *hdr;
 +      struct sk_buff *skb;
 +
 +      nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
 +
 +      skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
 +      if (!skb) {
 +              nfc_err("no memory for command");
 +              return -ENOMEM;
 +      }
 +
 +      hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
 +      hdr->gid = nci_opcode_gid(opcode);
 +      hdr->oid = nci_opcode_oid(opcode);
 +      hdr->plen = plen;
 +
 +      nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
 +      nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
 +
 +      if (plen)
 +              memcpy(skb_put(skb, plen), payload, plen);
 +
 +      skb->dev = (void *) ndev;
 +
 +      skb_queue_tail(&ndev->cmd_q, skb);
 +      queue_work(ndev->cmd_wq, &ndev->cmd_work);
 +
 +      return 0;
 +}
 +
 +/* ---- NCI TX Data worker thread ---- */
 +
 +static void nci_tx_work(struct work_struct *work)
 +{
 +      struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
 +      struct sk_buff *skb;
 +
 +      nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
 +
 +      /* Send queued tx data */
 +      while (atomic_read(&ndev->credits_cnt)) {
 +              skb = skb_dequeue(&ndev->tx_q);
 +              if (!skb)
 +                      return;
 +
 +              atomic_dec(&ndev->credits_cnt);
 +
 +              nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
 +                              nci_pbf(skb->data),
 +                              nci_conn_id(skb->data),
 +                              nci_plen(skb->data));
 +
 +              nci_send_frame(skb);
 +      }
 +}
 +
 +/* ----- NCI RX worker thread (data & control) ----- */
 +
 +static void nci_rx_work(struct work_struct *work)
 +{
 +      struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
 +      struct sk_buff *skb;
 +
 +      while ((skb = skb_dequeue(&ndev->rx_q))) {
 +              /* Process frame */
 +              switch (nci_mt(skb->data)) {
 +              case NCI_MT_RSP_PKT:
 +                      nci_rsp_packet(ndev, skb);
 +                      break;
 +
 +              case NCI_MT_NTF_PKT:
 +                      nci_ntf_packet(ndev, skb);
 +                      break;
 +
 +              case NCI_MT_DATA_PKT:
 +                      nci_rx_data_packet(ndev, skb);
 +                      break;
 +
 +              default:
 +                      nfc_err("unknown MT 0x%x", nci_mt(skb->data));
 +                      kfree_skb(skb);
 +                      break;
 +              }
 +      }
 +}
 +
 +/* ----- NCI TX CMD worker thread ----- */
 +
 +static void nci_cmd_work(struct work_struct *work)
 +{
 +      struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
 +      struct sk_buff *skb;
 +
 +      nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
 +
 +      /* Send queued command */
 +      if (atomic_read(&ndev->cmd_cnt)) {
 +              skb = skb_dequeue(&ndev->cmd_q);
 +              if (!skb)
 +                      return;
 +
 +              atomic_dec(&ndev->cmd_cnt);
 +
 +              nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
 +                              nci_pbf(skb->data),
 +                              nci_opcode_gid(nci_opcode(skb->data)),
 +                              nci_opcode_oid(nci_opcode(skb->data)),
 +                              nci_plen(skb->data));
 +
 +              nci_send_frame(skb);
 +
 +              mod_timer(&ndev->cmd_timer,
 +                      jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
 +      }
 +}
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 62f121d1d9cbdb833d27990fc36b58f2e3ab22aa,4113a66fe38f0d3fe6f511e411a1ca0a4587ddd4..2aaca826e36059d3b28b58bb01b70218a3cb718a
  #include <linux/if_arp.h>
  #include <linux/etherdevice.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  #include <net/iw_handler.h>
  #include <net/cfg80211.h>
 +#include <net/cfg80211-wext.h>
  #include "wext-compat.h"
  #include "core.h"
  
index 0d4b8c3033ff53bba23ef5dd5620922d3f0a2faf,8d4f37d383f40cee2fb4c72c5941700923198db3..9e9bdd91a807cdaa6b275bdef7b6e1c03b1e1479
@@@ -8,8 -8,8 +8,9 @@@
  #include <linux/etherdevice.h>
  #include <linux/if_arp.h>
  #include <linux/slab.h>
+ #include <linux/export.h>
  #include <net/cfg80211.h>
 +#include <net/cfg80211-wext.h>
  #include "wext-compat.h"
  #include "nl80211.h"
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index eb2747af1a0a881acbf1f47d647f75fadaa667de,9770f0c6f170b721cd1610d00896ce1cab7887c8..7c343878c82d69ce0c6cd7aaa0f983f28397ec38
  #include <linux/file.h>
  #include <linux/slab.h>
  #include <linux/time.h>
 -#include <linux/pm_qos_params.h>
 +#include <linux/pm_qos.h>
  #include <linux/uio.h>
  #include <linux/dma-mapping.h>
+ #include <linux/module.h>
  #include <sound/core.h>
  #include <sound/control.h>
  #include <sound/info.h>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 66fcccd79efe9a2cbc8aa591d491f0f15fbc378f,963efe066db886d8d49b8ccb04bea5b080af6806..d6535b849409ae3054eb303f728ad8d615537f1b
@@@ -13,7 -13,7 +13,8 @@@
  
  #include <linux/i2c.h>
  #include <linux/spi/spi.h>
 +#include <linux/regmap.h>
+ #include <linux/export.h>
  #include <sound/soc.h>
  
  #include <trace/events/asoc.h>
Simple merge
index 07bcfe4d18a7a9c319fdd310f92afe5e6f160414,3ebbdec2a46487f0917320bc4bb7d882ea60cbe0..3b5f517a3972a0ff375d3c98a0ac750dd8e40fb8
@@@ -15,8 -15,8 +15,9 @@@
   */
  
  #include <linux/firmware.h>
+ #include <linux/module.h>
  #include <linux/bitrev.h>
 +#include <linux/kernel.h>
  
  #include "firmware.h"
  #include "chip.h"
Simple merge
Simple merge
Simple merge