]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy...
authorJens Axboe <axboe@kernel.dk>
Tue, 31 Jul 2012 09:47:36 +0000 (11:47 +0200)
committerJens Axboe <axboe@kernel.dk>
Tue, 31 Jul 2012 09:47:36 +0000 (11:47 +0200)
2323 files changed:
.mailmap
Documentation/ABI/testing/sysfs-block-rssd
Documentation/ABI/testing/sysfs-bus-fcoe [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-iio
Documentation/ABI/testing/sysfs-bus-rbd
Documentation/ABI/testing/sysfs-class-mtd
Documentation/CodingStyle
Documentation/DocBook/media/v4l/controls.xml
Documentation/DocBook/media/v4l/pixfmt.xml
Documentation/DocBook/media/v4l/v4l2.xml
Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
Documentation/DocBook/media/v4l/vidioc-dqevent.xml
Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
Documentation/DocBook/mtdnand.tmpl
Documentation/arm/OMAP/DSS
Documentation/arm/SPEAr/overview.txt
Documentation/block/queue-sysfs.txt
Documentation/device-mapper/thin-provisioning.txt
Documentation/device-mapper/verity.txt
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/fsl-mma8450.txt
Documentation/devicetree/bindings/mfd/mc13xxx.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/mtd/gpmi-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/mxc-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/Locking
Documentation/filesystems/proc.txt
Documentation/filesystems/vfs.txt
Documentation/hwmon/coretemp
Documentation/i2c/functionality
Documentation/i2c/i2c-protocol
Documentation/kdump/kdump.txt
Documentation/kernel-parameters.txt
Documentation/networking/stmmac.txt
Documentation/power/charger-manager.txt
Documentation/power/power_supply_class.txt
Documentation/prctl/no_new_privs.txt [new file with mode: 0644]
Documentation/stable_kernel_rules.txt
Documentation/sysctl/fs.txt
Documentation/virtual/kvm/api.txt
Documentation/vm/frontswap.txt [new file with mode: 0644]
Documentation/vm/pagemap.txt
Documentation/vm/slub.txt
Documentation/watchdog/watchdog-kernel-api.txt
Documentation/watchdog/watchdog-parameters.txt
Documentation/x86/efi-stub.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/alpha/include/asm/posix_types.h
arch/alpha/kernel/signal.c
arch/arm/Kconfig
arch/arm/boot/dts/db8500.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/lpc32xx.dtsi
arch/arm/boot/dts/mmp2-brownstone.dts
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/phy3250.dts
arch/arm/boot/dts/snowball.dts
arch/arm/boot/dts/spear1310-evb.dts
arch/arm/boot/dts/spear1310.dtsi
arch/arm/boot/dts/spear1340-evb.dts
arch/arm/boot/dts/spear1340.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/boot/dts/spear300-evb.dts
arch/arm/boot/dts/spear300.dtsi
arch/arm/boot/dts/spear310-evb.dts
arch/arm/boot/dts/spear310.dtsi
arch/arm/boot/dts/spear320-evb.dts
arch/arm/boot/dts/spear320.dtsi
arch/arm/boot/dts/spear3xx.dtsi
arch/arm/boot/dts/spear600.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
arch/arm/boot/dts/vexpress-v2p-ca5s.dts
arch/arm/boot/dts/vexpress-v2p-ca9.dts
arch/arm/common/dmabounce.c
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/include/asm/atomic.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/hardware/sp810.h
arch/arm/include/asm/posix_types.h
arch/arm/include/asm/thread_info.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-thumb.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/signal.c
arch/arm/kernel/signal.h
arch/arm/kernel/smp.c
arch/arm/kernel/traps.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-dove/include/mach/bridge-regs.h
arch/arm/mach-dove/include/mach/dove.h
arch/arm/mach-ep93xx/snappercl15.c
arch/arm/mach-ep93xx/ts72xx.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/Makefile
arch/arm/mach-exynos/clock-exynos5.c
arch/arm/mach-exynos/cpuidle.c
arch/arm/mach-exynos/include/mach/pm-core.h
arch/arm/mach-exynos/include/mach/pmu.h
arch/arm/mach-exynos/include/mach/regs-clock.h
arch/arm/mach-exynos/include/mach/regs-pmu.h
arch/arm/mach-exynos/mach-nuri.c
arch/arm/mach-exynos/mach-origen.c
arch/arm/mach-exynos/mach-smdkv310.c
arch/arm/mach-exynos/mach-universal_c210.c
arch/arm/mach-exynos/pm.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-exynos/pmu.c
arch/arm/mach-highbank/Makefile
arch/arm/mach-highbank/core.h
arch/arm/mach-highbank/highbank.c
arch/arm/mach-highbank/smc.S [new file with mode: 0644]
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-imx1.c
arch/arm/mach-imx/clk-imx21.c
arch/arm/mach-imx/clk-imx25.c
arch/arm/mach-imx/clk-imx27.c
arch/arm/mach-imx/clk-imx31.c
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/clk-pllv2.c
arch/arm/mach-imx/crm-regs-imx5.h
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/imx27-dt.c
arch/arm/mach-imx/mach-cpuimx35.c
arch/arm/mach-imx/mach-cpuimx51sd.c
arch/arm/mach-imx/mach-imx27_visstrim_m10.c
arch/arm/mach-imx/mach-mx21ads.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-imx/mm-imx5.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-ixp4xx/include/mach/gpio.h
arch/arm/mach-ixp4xx/ixdp425-setup.c
arch/arm/mach-kirkwood/board-iconnect.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/include/mach/bridge-regs.h
arch/arm/mach-kirkwood/include/mach/kirkwood.h
arch/arm/mach-mmp/include/mach/gpio-pxa.h [deleted file]
arch/arm/mach-mmp/irq.c
arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
arch/arm/mach-mxs/mach-apx4devkit.c
arch/arm/mach-nomadik/board-nhk8815.c
arch/arm/mach-omap1/board-fsample.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/board-h3.c
arch/arm/mach-omap1/board-perseus2.c
arch/arm/mach-omap2/board-flash.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.h
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/cm.h
arch/arm/mach-omap2/cminst44xx.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/dsp.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/irq.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/mux.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_l3_smx.c
arch/arm/mach-omap2/omap_phy_internal.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/prm2xxx_3xxx.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-omap2/usb-musb.c
arch/arm/mach-omap2/usb-tusb6010.c
arch/arm/mach-orion5x/include/mach/bridge-regs.h
arch/arm/mach-orion5x/include/mach/io.h [new file with mode: 0644]
arch/arm/mach-orion5x/include/mach/orion5x.h
arch/arm/mach-orion5x/ts78xx-setup.c
arch/arm/mach-pxa/balloon3.c
arch/arm/mach-pxa/em-x270.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/palmtx.c
arch/arm/mach-s3c24xx/clock-s3c2440.c
arch/arm/mach-s3c24xx/include/mach/irqs.h
arch/arm/mach-s3c24xx/irq-s3c2416.c
arch/arm/mach-s3c24xx/mach-smdk2416.c
arch/arm/mach-s3c24xx/s3c2416.c
arch/arm/mach-s3c64xx/cpuidle.c
arch/arm/mach-s3c64xx/mach-anw6410.c
arch/arm/mach-s3c64xx/mach-crag6410-module.c
arch/arm/mach-s3c64xx/mach-crag6410.c
arch/arm/mach-s3c64xx/mach-hmt.c
arch/arm/mach-s3c64xx/mach-mini6410.c
arch/arm/mach-s3c64xx/mach-real6410.c
arch/arm/mach-s3c64xx/mach-smartq5.c
arch/arm/mach-s3c64xx/mach-smartq7.c
arch/arm/mach-s3c64xx/mach-smdk6410.c
arch/arm/mach-s5p64x0/mach-smdk6440.c
arch/arm/mach-s5p64x0/mach-smdk6450.c
arch/arm/mach-s5pc100/mach-smdkc100.c
arch/arm/mach-s5pv210/mach-aquila.c
arch/arm/mach-s5pv210/mach-goni.c
arch/arm/mach-s5pv210/mach-smdkv210.c
arch/arm/mach-shmobile/Kconfig
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-kzm9d.c
arch/arm/mach-shmobile/board-kzm9g.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/intc-r8a7779.c
arch/arm/mach-shmobile/platsmp.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mach-spear13xx/include/mach/debug-macro.S
arch/arm/mach-spear13xx/include/mach/dma.h
arch/arm/mach-spear13xx/include/mach/generic.h
arch/arm/mach-spear13xx/include/mach/gpio.h
arch/arm/mach-spear13xx/include/mach/irqs.h
arch/arm/mach-spear13xx/include/mach/spear.h
arch/arm/mach-spear13xx/include/mach/timex.h
arch/arm/mach-spear13xx/include/mach/uncompress.h
arch/arm/mach-spear13xx/spear1310.c
arch/arm/mach-spear13xx/spear1340.c
arch/arm/mach-spear13xx/spear13xx.c
arch/arm/mach-spear3xx/include/mach/debug-macro.S
arch/arm/mach-spear3xx/include/mach/generic.h
arch/arm/mach-spear3xx/include/mach/gpio.h
arch/arm/mach-spear3xx/include/mach/irqs.h
arch/arm/mach-spear3xx/include/mach/misc_regs.h
arch/arm/mach-spear3xx/include/mach/spear.h
arch/arm/mach-spear3xx/include/mach/timex.h
arch/arm/mach-spear3xx/include/mach/uncompress.h
arch/arm/mach-spear3xx/spear300.c
arch/arm/mach-spear3xx/spear310.c
arch/arm/mach-spear3xx/spear320.c
arch/arm/mach-spear3xx/spear3xx.c
arch/arm/mach-spear6xx/include/mach/gpio.h
arch/arm/mach-spear6xx/include/mach/misc_regs.h
arch/arm/mach-spear6xx/spear6xx.c
arch/arm/mach-tegra/reset.c
arch/arm/mach-ux500/board-mop500-uib.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/board-mop500.h
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-ux500/timer.c
arch/arm/mach-versatile/core.c
arch/arm/mach-versatile/include/mach/hardware.h
arch/arm/mach-versatile/include/mach/io.h [new file with mode: 0644]
arch/arm/mach-versatile/pci.c
arch/arm/mach-vexpress/v2m.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/arm/net/bpf_jit_32.h
arch/arm/plat-mxc/epit.c
arch/arm/plat-mxc/include/mach/common.h
arch/arm/plat-mxc/include/mach/mx2_cam.h
arch/arm/plat-mxc/time.c
arch/arm/plat-omap/clock.c
arch/arm/plat-omap/include/plat/cpu.h
arch/arm/plat-omap/include/plat/gpmc.h
arch/arm/plat-omap/include/plat/mmc.h
arch/arm/plat-orion/common.c
arch/arm/plat-pxa/ssp.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/include/plat/fb.h
arch/arm/plat-samsung/include/plat/map-s3c.h
arch/arm/plat-samsung/include/plat/s3c2416.h
arch/arm/plat-samsung/include/plat/watchdog-reset.h
arch/arm/plat-samsung/s5p-clock.c
arch/arm/plat-spear/include/plat/debug-macro.S
arch/arm/plat-spear/include/plat/pl080.h
arch/arm/plat-spear/include/plat/shirq.h
arch/arm/plat-spear/include/plat/timex.h
arch/arm/plat-spear/include/plat/uncompress.h
arch/arm/plat-spear/pl080.c
arch/arm/plat-spear/restart.c
arch/arm/plat-spear/shirq.c
arch/avr32/include/asm/posix_types.h
arch/avr32/kernel/entry-avr32b.S
arch/avr32/kernel/signal.c
arch/blackfin/include/asm/posix_types.h
arch/blackfin/include/asm/thread_info.h
arch/blackfin/kernel/process.c
arch/blackfin/kernel/signal.c
arch/blackfin/kernel/trace.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-common/entry.S
arch/c6x/kernel/signal.c
arch/cris/arch-v10/kernel/signal.c
arch/cris/arch-v32/kernel/signal.c
arch/cris/include/asm/posix_types.h
arch/cris/kernel/ptrace.c
arch/frv/include/asm/posix_types.h
arch/frv/include/asm/thread_info.h
arch/frv/kernel/entry.S
arch/frv/kernel/signal.c
arch/h8300/include/asm/pgtable.h
arch/h8300/include/asm/posix_types.h
arch/h8300/include/asm/uaccess.h
arch/h8300/kernel/setup.c
arch/h8300/kernel/signal.c
arch/h8300/kernel/time.c
arch/h8300/mm/init.c
arch/hexagon/kernel/signal.c
arch/ia64/include/asm/posix_types.h
arch/ia64/include/asm/thread_info.h
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/process.c
arch/ia64/kernel/signal.c
arch/ia64/kernel/sys_ia64.c
arch/m32r/boot/compressed/Makefile
arch/m32r/boot/compressed/misc.c
arch/m32r/include/asm/posix_types.h
arch/m32r/include/asm/ptrace.h
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/signal.c
arch/m68k/Kconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/m528xsim.h
arch/m68k/include/asm/posix_types.h
arch/m68k/include/asm/uaccess_mm.h
arch/m68k/kernel/ptrace.c
arch/m68k/kernel/signal.c
arch/m68k/kernel/time.c
arch/m68k/lib/uaccess.c
arch/m68k/platform/68328/timers.c
arch/m68k/platform/68360/config.c
arch/m68k/platform/coldfire/clk.c
arch/microblaze/include/asm/thread_info.h
arch/microblaze/kernel/signal.c
arch/mips/Kconfig
arch/mips/alchemy/devboards/db1200.c
arch/mips/alchemy/devboards/db1300.c
arch/mips/alchemy/devboards/db1550.c
arch/mips/bcm47xx/Kconfig
arch/mips/bcm63xx/dev-pcmcia.c
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/bitops.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/io.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/mipsmtregs.h
arch/mips/include/asm/posix_types.h
arch/mips/include/asm/stat.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/thread_info.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/signal-common.h
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/signal_n32.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc.c
arch/mips/kernel/sync-r4k.c
arch/mips/kernel/traps.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/page-funcs.S [new file with mode: 0644]
arch/mips/mm/page.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-pci.c
arch/mips/mti-malta/malta-setup.c
arch/mips/netlogic/xlp/setup.c
arch/mips/oprofile/common.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/fixup-fuloong2e.c
arch/mips/pci/fixup-lemote2f.c
arch/mips/pci/fixup-malta.c
arch/mips/pci/fixup-mpc30x.c
arch/mips/pci/fixup-sb1250.c
arch/mips/pci/ops-tx4927.c
arch/mips/pci/pci-ip27.c
arch/mips/pci/pci-lantiq.c
arch/mips/pci/pci-xlr.c
arch/mips/pmc-sierra/yosemite/smp.c
arch/mips/pnx833x/common/platform.c
arch/mips/powertv/asic/asic-calliope.c
arch/mips/powertv/asic/asic-cronus.c
arch/mips/powertv/asic/asic-gaia.c
arch/mips/powertv/asic/asic-zeus.c
arch/mips/rb532/devices.c
arch/mips/txx9/generic/pci.c
arch/mn10300/include/asm/posix_types.h
arch/mn10300/include/asm/ptrace.h
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/asm/timex.h
arch/mn10300/kernel/cevt-mn10300.c
arch/mn10300/kernel/internal.h
arch/mn10300/kernel/irq.c
arch/mn10300/kernel/signal.c
arch/mn10300/kernel/traps.c
arch/mn10300/mm/dma-alloc.c
arch/mn10300/unit-asb2303/include/unit/timex.h
arch/mn10300/unit-asb2303/smc91111.c
arch/mn10300/unit-asb2305/include/unit/timex.h
arch/mn10300/unit-asb2305/unit-init.c
arch/mn10300/unit-asb2364/include/unit/timex.h
arch/openrisc/kernel/signal.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/Kbuild
arch/parisc/include/asm/bug.h
arch/parisc/include/asm/posix_types.h
arch/parisc/include/asm/smp.h
arch/parisc/include/asm/stat.h
arch/parisc/include/asm/thread_info.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/kernel/signal.c
arch/parisc/kernel/signal32.c
arch/parisc/kernel/vmlinux.lds.S
arch/parisc/lib/lusercopy.S
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/posix_types.h
arch/powerpc/include/asm/stat.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/module_32.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal.h
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/mm/mmu_context_nohash.c
arch/powerpc/mm/numa.c
arch/powerpc/net/bpf_jit_64.S
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/nvram.c
arch/powerpc/platforms/pseries/processor_idle.c
arch/powerpc/xmon/xmon.c
arch/s390/Kconfig
arch/s390/include/asm/bitops.h
arch/s390/include/asm/cio.h
arch/s390/include/asm/cmpxchg.h
arch/s390/include/asm/cputime.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/current.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/futex.h
arch/s390/include/asm/idals.h
arch/s390/include/asm/io.h
arch/s390/include/asm/irq.h
arch/s390/include/asm/kexec.h
arch/s390/include/asm/kmap_types.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/module.h
arch/s390/include/asm/os_info.h
arch/s390/include/asm/percpu.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/posix_types.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/rwsem.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/sfp-util.h
arch/s390/include/asm/string.h
arch/s390/include/asm/thread_info.h
arch/s390/include/asm/timer.h
arch/s390/include/asm/tlb.h
arch/s390/include/asm/tlbflush.h
arch/s390/include/asm/types.h
arch/s390/include/asm/uaccess.h
arch/s390/include/asm/vdso.h
arch/s390/kernel/base.S
arch/s390/kernel/compat_signal.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.h
arch/s390/kernel/head_kdump.S
arch/s390/kernel/ipl.c
arch/s390/kernel/irq.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/os_info.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/sysinfo.c
arch/s390/lib/uaccess_mvcos.c
arch/s390/lib/uaccess_std.c
arch/s390/mm/maccess.c
arch/s390/mm/vmem.c
arch/s390/oprofile/hwsampler.c
arch/score/kernel/signal.c
arch/sh/Kconfig
arch/sh/Makefile
arch/sh/boards/mach-kfr2r09/setup.c
arch/sh/boards/mach-migor/setup.c
arch/sh/drivers/pci/pcie-sh7786.c
arch/sh/include/asm/Kbuild
arch/sh/include/asm/bitsperlong.h [deleted file]
arch/sh/include/asm/cputime.h [deleted file]
arch/sh/include/asm/current.h [deleted file]
arch/sh/include/asm/delay.h [deleted file]
arch/sh/include/asm/div64.h [deleted file]
arch/sh/include/asm/emergency-restart.h [deleted file]
arch/sh/include/asm/errno.h [deleted file]
arch/sh/include/asm/fcntl.h [deleted file]
arch/sh/include/asm/io_noioport.h
arch/sh/include/asm/ioctl.h [deleted file]
arch/sh/include/asm/ipcbuf.h [deleted file]
arch/sh/include/asm/irq_regs.h [deleted file]
arch/sh/include/asm/kvm_para.h [deleted file]
arch/sh/include/asm/local.h [deleted file]
arch/sh/include/asm/local64.h [deleted file]
arch/sh/include/asm/mman.h [deleted file]
arch/sh/include/asm/msgbuf.h [deleted file]
arch/sh/include/asm/param.h [deleted file]
arch/sh/include/asm/parport.h [deleted file]
arch/sh/include/asm/percpu.h [deleted file]
arch/sh/include/asm/poll.h [deleted file]
arch/sh/include/asm/posix_types_32.h
arch/sh/include/asm/posix_types_64.h
arch/sh/include/asm/resource.h [deleted file]
arch/sh/include/asm/scatterlist.h [deleted file]
arch/sh/include/asm/sembuf.h [deleted file]
arch/sh/include/asm/serial.h [deleted file]
arch/sh/include/asm/shmbuf.h [deleted file]
arch/sh/include/asm/siginfo.h [deleted file]
arch/sh/include/asm/sizes.h [deleted file]
arch/sh/include/asm/socket.h [deleted file]
arch/sh/include/asm/statfs.h [deleted file]
arch/sh/include/asm/termbits.h [deleted file]
arch/sh/include/asm/termios.h [deleted file]
arch/sh/include/asm/thread_info.h
arch/sh/include/asm/uaccess.h
arch/sh/include/asm/uaccess_32.h
arch/sh/include/asm/uaccess_64.h
arch/sh/include/asm/ucontext.h [deleted file]
arch/sh/include/asm/word-at-a-time.h [new file with mode: 0644]
arch/sh/include/asm/xor.h [deleted file]
arch/sh/include/cpu-sh2a/cpu/ubc.h [deleted file]
arch/sh/kernel/cpu/sh3/serial-sh7720.c
arch/sh/kernel/cpu/sh4a/clock-sh7343.c
arch/sh/kernel/cpu/sh4a/clock-sh7366.c
arch/sh/kernel/cpu/sh4a/clock-sh7722.c
arch/sh/kernel/cpu/sh4a/clock-sh7723.c
arch/sh/kernel/cpu/sh4a/clock-sh7724.c
arch/sh/kernel/cpu/sh4a/clock-sh7734.c
arch/sh/kernel/cpu/sh4a/clock-sh7757.c
arch/sh/kernel/cpu/sh4a/clock-sh7785.c
arch/sh/kernel/cpu/sh4a/clock-sh7786.c
arch/sh/kernel/cpu/sh4a/clock-shx3.c
arch/sh/kernel/cpu/sh5/entry.S
arch/sh/kernel/process.c
arch/sh/kernel/process_64.c
arch/sh/kernel/sh_ksyms_64.c
arch/sh/kernel/signal_32.c
arch/sh/kernel/signal_64.c
arch/sh/kernel/smp.c
arch/sparc/Kconfig
arch/sparc/include/asm/asi.h
arch/sparc/include/asm/asmmacro.h
arch/sparc/include/asm/cmt.h [deleted file]
arch/sparc/include/asm/dma-mapping.h
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/leon_amba.h
arch/sparc/include/asm/mpmbox.h [deleted file]
arch/sparc/include/asm/pgtsrmmu.h
arch/sparc/include/asm/posix_types.h
arch/sparc/include/asm/psr.h
arch/sparc/include/asm/sections.h
arch/sparc/include/asm/thread_info_32.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/cpu.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/etrap_32.S
arch/sparc/kernel/head_32.S
arch/sparc/kernel/ioport.c
arch/sparc/kernel/irq_32.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_pmc.c
arch/sparc/kernel/leon_smp.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/prom_common.c
arch/sparc/kernel/rtrap_32.S
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/trampoline_32.S
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/vio.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/kernel/wof.S
arch/sparc/kernel/wuf.S
arch/sparc/math-emu/math_64.c
arch/sparc/mm/Makefile
arch/sparc/mm/leon_mm.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/srmmu_access.S [new file with mode: 0644]
arch/tile/include/asm/compat.h
arch/tile/include/asm/thread_info.h
arch/tile/include/asm/uaccess.h
arch/tile/kernel/backtrace.c
arch/tile/kernel/compat_signal.c
arch/tile/kernel/entry.S
arch/tile/kernel/process.c
arch/tile/kernel/setup.c
arch/tile/kernel/signal.c
arch/um/drivers/mconsole_kern.c
arch/um/include/shared/frame_kern.h
arch/um/kernel/process.c
arch/um/kernel/reboot.c
arch/um/kernel/signal.c
arch/um/kernel/trap.c
arch/unicore32/kernel/signal.c
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/boot/header.S
arch/x86/boot/tools/build.c
arch/x86/crypto/aesni-intel_asm.S
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/nmi.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/posix_types_32.h
arch/x86/include/asm/sighandling.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uv/uv_bau.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mkcapflags.pl
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ftrace.c
arch/x86/kernel/hpet.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/nmi.c
arch/x86/kernel/nmi_selftest.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kvm/mmu.c
arch/x86/lib/csum-wrappers_64.c
arch/x86/lib/usercopy.c
arch/x86/lib/x86-opcode-map.txt
arch/x86/mm/init.c
arch/x86/mm/ioremap.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/srat.c
arch/x86/platform/mrst/early_printk_mrst.c
arch/x86/platform/mrst/mrst.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/syscalls/syscall_32.tbl
arch/x86/syscalls/syscall_64.tbl
arch/x86/tools/gen-insn-attr-x86.awk
arch/x86/um/signal.c
arch/x86/um/sys_call_table_32.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/xtensa/Makefile
arch/xtensa/include/asm/syscall.h
arch/xtensa/kernel/process.c
arch/xtensa/kernel/signal.c
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-ioc.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk-timeout.c
block/blk.h
block/bsg-lib.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/acpi/Kconfig
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/ghes.c
drivers/acpi/battery.c
drivers/acpi/bus.c
drivers/acpi/power.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/video.c
drivers/ata/pata_arasan_cf.c
drivers/atm/solos-pci.c
drivers/base/dd.c
drivers/base/power/main.c
drivers/base/regmap/regmap-i2c.c
drivers/base/regmap/regmap.c
drivers/base/soc.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_pci.c
drivers/bcma/sprom.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_proc.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/nbd.c
drivers/block/rbd.c
drivers/block/umem.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btusb.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/hw_random/atmel-rng.c
drivers/clk/clk.c
drivers/clk/mxs/clk-imx23.c
drivers/clk/mxs/clk-imx28.c
drivers/clk/spear/clk-aux-synth.c
drivers/clk/spear/clk-frac-synth.c
drivers/clk/spear/clk-gpt-synth.c
drivers/clk/spear/clk-vco-pll.c
drivers/clk/spear/clk.c
drivers/clk/spear/clk.h
drivers/clk/spear/spear1310_clock.c
drivers/clk/spear/spear1340_clock.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/spear/spear6xx_clock.c
drivers/clocksource/Makefile
drivers/clocksource/em_sti.c [new file with mode: 0644]
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/dma/dw_dmac.c
drivers/dma/imx-sdma.c
drivers/dma/pl330.c
drivers/edac/edac_mc.c
drivers/edac/i7core_edac.c
drivers/edac/mce_amd.h
drivers/edac/mpc85xx_edac.c
drivers/edac/sb_edac.c
drivers/extcon/extcon-max8997.c
drivers/extcon/extcon_class.c
drivers/extcon/extcon_gpio.c
drivers/gpio/Kconfig
drivers/gpio/devres.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-samsung.c
drivers/gpio/gpio-sta2x11.c
drivers/gpio/gpio-tps65910.c
drivers/gpio/gpio-wm8994.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fb.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/opregion.c
drivers/gpu/drm/gma500/opregion.h
drivers/gpu/drm/gma500/psb_device.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sdvo_regs.h
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_reg.h
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/via/via_map.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/Kconfig
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/Kconfig
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/Kconfig
drivers/hwmon/applesmc.c
drivers/hwmon/coretemp.c
drivers/hwmon/emc2103.c
drivers/hwmon/it87.c
drivers/hwmon/jc42.c
drivers/hwmon/lineage-pem.c
drivers/hwmon/ltc4261.c
drivers/hwmon/max16065.c
drivers/hwmon/sch5627.c
drivers/hwmon/sch5636.c
drivers/hwmon/sch56xx-common.c
drivers/hwmon/sch56xx-common.h
drivers/hwspinlock/hwspinlock_core.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-nuc900.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/i2c-dev.c
drivers/i2c/muxes/Kconfig
drivers/i2c/muxes/Makefile
drivers/i2c/muxes/i2c-mux-pinctrl.c [new file with mode: 0644]
drivers/ide/icside.c
drivers/ide/ide-cs.c
drivers/iio/Kconfig
drivers/iio/industrialio-core.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/input/joystick/as5011.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/mcs_touchkey.c
drivers/input/keyboard/mpr121_touchkey.c
drivers/input/keyboard/qt1070.c
drivers/input/keyboard/tca6416-keypad.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/keyboard/tnetv107x-keypad.c
drivers/input/misc/ad714x.c
drivers/input/misc/dm355evm_keys.c
drivers/input/mouse/bcm5974.c
drivers/input/tablet/wacom_sys.c
drivers/input/touchscreen/ad7879.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/bu21013_ts.c
drivers/input/touchscreen/cy8ctmg110_ts.c
drivers/input/touchscreen/intel-mid-touch.c
drivers/input/touchscreen/pixcir_i2c_ts.c
drivers/input/touchscreen/tnetv107x-ts.c
drivers/input/touchscreen/tsc2005.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/tegra-smmu.c
drivers/isdn/mISDN/stack.c
drivers/leds/Kconfig
drivers/leds/led-class.c
drivers/leds/led-core.c
drivers/leds/ledtrig-heartbeat.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/persistent-data/dm-space-map-checker.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/saa7146_fops.c
drivers/media/dvb/dvb-core/dvbdev.c
drivers/media/dvb/frontends/cx24110.c
drivers/media/dvb/frontends/cxd2820r_c.c
drivers/media/dvb/frontends/lg2160.c
drivers/media/dvb/siano/smsusb.c
drivers/media/radio/radio-maxiradio.c
drivers/media/radio/radio-sf16fmr2.c
drivers/media/radio/si470x/radio-si470x-usb.c
drivers/media/rc/winbond-cir.c
drivers/media/video/bt8xx/bttv-cards.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/bt8xx/bttv.h
drivers/media/video/bt8xx/bttvp.h
drivers/media/video/bw-qcam.c
drivers/media/video/cx18/cx18-driver.c
drivers/media/video/cx18/cx18-driver.h
drivers/media/video/cx18/cx18-firmware.c
drivers/media/video/cx18/cx18-mailbox.c
drivers/media/video/cx231xx/cx231xx-audio.c
drivers/media/video/cx231xx/cx231xx-vbi.c
drivers/media/video/cx23885/cx23885-cards.c
drivers/media/video/cx23885/cx23885-dvb.c
drivers/media/video/cx23885/cx23885-video.c
drivers/media/video/cx23885/cx23885.h
drivers/media/video/cx25821/cx25821-core.c
drivers/media/video/cx25821/cx25821.h
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/cx88/cx88-blackbird.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/em28xx/em28xx-input.c
drivers/media/video/gspca/gspca.c
drivers/media/video/gspca/ov534.c
drivers/media/video/gspca/ov534_9.c
drivers/media/video/gspca/pac7311.c
drivers/media/video/gspca/sn9c20x.c
drivers/media/video/gspca/sonixj.c
drivers/media/video/ivtv/ivtv-driver.c
drivers/media/video/ivtv/ivtv-driver.h
drivers/media/video/mem2mem_testdev.c
drivers/media/video/mx2_camera.c
drivers/media/video/omap3isp/isppreview.c
drivers/media/video/pms.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-lite.c
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-mdevice.h
drivers/media/video/s5p-mfc/regs-mfc.h
drivers/media/video/s5p-mfc/s5p_mfc_dec.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/s5p-mfc/s5p_mfc_opr.h
drivers/media/video/s5p-mfc/s5p_mfc_shm.h
drivers/media/video/smiapp/Kconfig
drivers/media/video/smiapp/smiapp-core.c
drivers/media/video/tuner-core.c
drivers/media/video/v4l2-dev.c
drivers/media/video/v4l2-ioctl.c
drivers/media/video/vino.c
drivers/media/video/vivi.c
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptctl.c
drivers/mfd/Kconfig
drivers/mfd/ab5500-core.h [deleted file]
drivers/mfd/db8500-prcmu.c
drivers/mfd/mc13xxx-spi.c
drivers/mfd/omap-usb-host.c
drivers/mfd/palmas.c
drivers/mfd/stmpe-i2c.c
drivers/mfd/stmpe-spi.c
drivers/misc/mei/interrupt.c
drivers/misc/mei/main.c
drivers/misc/mei/wd.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/mmc/card/block.c
drivers/mmc/core/cd-gpio.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/host/atmel-mci-regs.h
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci.c
drivers/mtd/Kconfig
drivers/mtd/bcm63xxpart.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/cmdlinepart.c
drivers/mtd/devices/block2mtd.c
drivers/mtd/devices/docg3.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/spear_smi.c
drivers/mtd/lpddr/qinfo_probe.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/intel_vr_nor.c
drivers/mtd/maps/pci.c
drivers/mtd/maps/scb2_flash.c
drivers/mtd/maps/wr_sbc82xx_flash.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdoops.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/alauda.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/au1550nd.c
drivers/mtd/nand/bcm_umi_bch.c
drivers/mtd/nand/bcm_umi_nand.c
drivers/mtd/nand/bf5xx_nand.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/cs553x_nand.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/docg4.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/mtd/nand/fsl_ifc_nand.c
drivers/mtd/nand/fsmc_nand.c
drivers/mtd/nand/gpmi-nand/bch-regs.h
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.h
drivers/mtd/nand/h1910.c
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/mpc5121_nfc.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/pasemi_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/r852.c
drivers/mtd/nand/sh_flctl.c
drivers/mtd/nand/sm_common.c
drivers/mtd/onenand/onenand_base.c
drivers/mtd/ubi/debug.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/caif/caif_hsi.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/flexcan.c
drivers/net/dummy.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/tile/Kconfig
drivers/net/ethernet/tile/Makefile
drivers/net/ethernet/tile/tilegx.c [new file with mode: 0644]
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/phy/icplus.c
drivers/net/phy/mdio-mux.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/usb/asix.c
drivers/net/usb/ipheth.c
drivers/net/usb/mcs7830.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/sierra_net.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/key.c
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/iwlwifi/iwl-phy-db.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-phy-db.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rtl818x/rtl8187/leds.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/ti/wl1251/acx.c
drivers/net/wireless/ti/wl1251/event.c
drivers/net/wireless/ti/wl1251/sdio.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wlcore/Kconfig
drivers/net/wireless/ti/wlcore/acx.c
drivers/net/wireless/ti/wlcore/acx.h
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/pn544_hci.c
drivers/of/platform.c
drivers/oprofile/oprofile_perf.c
drivers/pci/pci-driver.c
drivers/pinctrl/core.c
drivers/pinctrl/pinctrl-imx.c
drivers/pinctrl/pinctrl-imx6q.c
drivers/pinctrl/pinctrl-mxs.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-sirf.c
drivers/pinctrl/spear/pinctrl-spear.c
drivers/pinctrl/spear/pinctrl-spear.h
drivers/pinctrl/spear/pinctrl-spear1310.c
drivers/pinctrl/spear/pinctrl-spear1340.c
drivers/pinctrl/spear/pinctrl-spear300.c
drivers/pinctrl/spear/pinctrl-spear310.c
drivers/pinctrl/spear/pinctrl-spear320.c
drivers/pinctrl/spear/pinctrl-spear3xx.c
drivers/pinctrl/spear/pinctrl-spear3xx.h
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/acerhdf.c
drivers/platform/x86/apple-gmux.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/fujitsu-tablet.c
drivers/platform/x86/hdaps.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_ips.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/platform/x86/xo1-rfkill.c
drivers/power/Kconfig
drivers/power/ab8500_btemp.c
drivers/power/ab8500_charger.c
drivers/power/ab8500_fg.c
drivers/power/charger-manager.c
drivers/power/ds2781_battery.c
drivers/power/isp1704_charger.c
drivers/power/max17042_battery.c
drivers/power/power_supply_sysfs.c
drivers/power/sbs-battery.c
drivers/power/smb347-charger.c
drivers/rapidio/Kconfig
drivers/rapidio/devices/Makefile
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/rapidio/devices/tsi721_dma.c [new file with mode: 0644]
drivers/rapidio/rio.c
drivers/regulator/ab8500.c
drivers/regulator/anatop-regulator.c
drivers/regulator/core.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/gpio-regulator.c
drivers/regulator/max8649.c
drivers/regulator/palmas-regulator.c
drivers/regulator/s5m8767.c
drivers/regulator/tps65023-regulator.c
drivers/regulator/tps6524x-regulator.c
drivers/remoteproc/Kconfig
drivers/remoteproc/omap_remoteproc.c
drivers/remoteproc/remoteproc_core.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/rtc-ab8500.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-twl.c
drivers/s390/block/dasd_int.h
drivers/s390/char/sclp_sdias.c
drivers/scsi/aic94xx/aic94xx_task.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/fcoe/Makefile
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fcoe/fcoe_sysfs.c [new file with mode: 0644]
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/Makefile
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_target.h [new file with mode: 0644]
drivers/scsi/qla2xxx/tcm_qla2xxx.c [new file with mode: 0644]
drivers/scsi/qla2xxx/tcm_qla2xxx.h [new file with mode: 0644]
drivers/scsi/qla4xxx/ql4_attr.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_nx.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_wait_scan.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.c
drivers/spi/spi-omap2-mcspi.c
drivers/staging/comedi/drivers.c
drivers/staging/gdm72xx/netlink_k.c
drivers/staging/iio/Documentation/device.txt
drivers/staging/iio/adc/Kconfig
drivers/staging/iio/adc/ad7606_core.c
drivers/staging/media/lirc/lirc_serial.c
drivers/staging/omapdrm/omap_fbdev.c
drivers/staging/ramster/zcache-main.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/zcache/zcache-main.c
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_sess.c
drivers/tty/amiserial.c
drivers/tty/cyclades.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/n_r3964.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/crisv10.c
drivers/tty/serial/serial_txx9.c
drivers/tty/serial/sh-sci.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/tty/tty_io.c
drivers/tty/tty_ldisc.c
drivers/tty/tty_mutex.c
drivers/tty/tty_port.c
drivers/usb/Makefile
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/atmel_usba_udc.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/fsl_qe_udc.h
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/goku_udc.c
drivers/usb/gadget/lpc32xx_udc.c
drivers/usb/gadget/mv_udc_core.c
drivers/usb/gadget/omap_udc.c
drivers/usb/gadget/pxa25x_udc.c
drivers/usb/gadget/s3c-hsudc.c
drivers/usb/gadget/s3c2410_udc.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-sh.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/davinci.c
drivers/usb/musb/davinci.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/otg/twl6030-usb.c
drivers/usb/phy/Kconfig
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/mct_u232.c
drivers/usb/serial/metro-usb.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/scsiglue.c
drivers/vhost/vhost.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/auo_k1900fb.c [new file with mode: 0644]
drivers/video/auo_k1901fb.c [new file with mode: 0644]
drivers/video/auo_k190x.c [new file with mode: 0644]
drivers/video/auo_k190x.h [new file with mode: 0644]
drivers/video/backlight/Kconfig
drivers/video/backlight/ili9320.c
drivers/video/bfin_adv7393fb.c
drivers/video/broadsheetfb.c
drivers/video/cobalt_lcdfb.c
drivers/video/console/Kconfig
drivers/video/ep93xx-fb.c
drivers/video/exynos/exynos_dp_core.c
drivers/video/exynos/exynos_dp_core.h
drivers/video/exynos/exynos_dp_reg.c
drivers/video/exynos/exynos_dp_reg.h
drivers/video/exynos/exynos_mipi_dsi.c
drivers/video/exynos/exynos_mipi_dsi_common.c
drivers/video/exynos/s6e8ax0.c
drivers/video/fb_defio.c
drivers/video/fbsysfs.c
drivers/video/fsl-diu-fb.c
drivers/video/intelfb/intelfbdrv.c
drivers/video/matrox/matroxfb_maven.c
drivers/video/mb862xx/mb862xx-i2c.c
drivers/video/mb862xx/mb862xxfbdrv.c
drivers/video/mbx/mbxfb.c
drivers/video/mxsfb.c
drivers/video/omap/Kconfig
drivers/video/omap2/displays/panel-acx565akm.c
drivers/video/omap2/displays/panel-generic-dpi.c
drivers/video/omap2/displays/panel-n8x0.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/displays/panel-tfp410.c
drivers/video/omap2/displays/panel-tpo-td043mtea1.c
drivers/video/omap2/dss/Kconfig
drivers/video/omap2/dss/apply.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dispc.h
drivers/video/omap2/dss/display.c
drivers/video/omap2/dss/dpi.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/dss.h
drivers/video/omap2/dss/dss_features.c
drivers/video/omap2/dss/dss_features.h
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/hdmi_panel.c
drivers/video/omap2/dss/manager.c
drivers/video/omap2/dss/overlay.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/sdi.c
drivers/video/omap2/dss/ti_hdmi.h
drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
drivers/video/omap2/dss/venc.c
drivers/video/omap2/omapfb/omapfb-ioctl.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/video/omap2/omapfb/omapfb.h
drivers/video/omap2/vrfb.c
drivers/video/pxa3xx-gcu.c
drivers/video/s3c-fb.c
drivers/video/savage/savagefb_driver.c
drivers/video/sh_mobile_hdmi.c
drivers/video/sis/init.h
drivers/video/sis/sis_main.c
drivers/video/skeletonfb.c
drivers/video/smscufx.c
drivers/video/udlfb.c
drivers/video/via/viafbdev.c
drivers/virtio/virtio_balloon.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/da9052_wdt.c [new file with mode: 0644]
drivers/watchdog/hpwdt.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/via_wdt.c
drivers/watchdog/watchdog_core.c
drivers/watchdog/watchdog_core.h [moved from drivers/watchdog/watchdog_dev.h with 79% similarity]
drivers/watchdog/watchdog_dev.c
drivers/xen/events.c
drivers/xen/pci.c
drivers/xen/tmem.c
fs/9p/vfs_inode_dotl.c
fs/affs/affs.h
fs/aio.c
fs/attr.c
fs/binfmt_elf.c
fs/binfmt_flat.c
fs/btrfs/acl.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ioctl.h
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/print-tree.c
fs/btrfs/rcu-string.h [new file with mode: 0644]
fs/btrfs/reada.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/ulist.c
fs/btrfs/ulist.h
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/xattr.c
fs/buffer.c
fs/ceph/addr.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/ioctl.c
fs/ceph/ioctl.h
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/xattr.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/transport.c
fs/compat.c
fs/dcache.c
fs/direct-io.c
fs/ecryptfs/inode.c
fs/ecryptfs/kthread.c
fs/ecryptfs/miscdev.c
fs/eventfd.c
fs/eventpoll.c
fs/exec.c
fs/exofs/ore.c
fs/exofs/ore_raid.c
fs/exofs/sys.c
fs/exportfs/expfs.c
fs/ext4/Kconfig
fs/ext4/balloc.c
fs/ext4/bitmap.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/fat/dir.c
fs/fat/fat.h
fs/fat/fatent.c
fs/fat/inode.c
fs/fcntl.c
fs/fifo.c
fs/file_table.c
fs/fs-writeback.c
fs/fuse/control.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/export.c
fs/hfsplus/ioctl.c
fs/hfsplus/wrapper.c
fs/hpfs/alloc.c
fs/hpfs/anode.c
fs/hpfs/buffer.c
fs/hpfs/dir.c
fs/hpfs/dnode.c
fs/hpfs/ea.c
fs/hpfs/hpfs.h
fs/hpfs/hpfs_fn.h
fs/hpfs/inode.c
fs/hpfs/map.c
fs/hpfs/namei.c
fs/hpfs/super.c
fs/inode.c
fs/internal.h
fs/isofs/export.c
fs/jbd2/Kconfig
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/jffs2/jffs2_fs_sb.h
fs/jffs2/nodemgmt.c
fs/jffs2/os-linux.h
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/jffs2/wbuf.c
fs/jffs2/xattr.c
fs/jffs2/xattr.h
fs/lockd/clntlock.c
fs/lockd/svc.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/ncpfs/file.c
fs/ncpfs/ncp_fs_sb.h
fs/nfs/callback.c
fs/nfs/callback_xdr.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/idmap.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/proc.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/auth.c
fs/nfsd/export.c
fs/nfsd/fault_inject.c
fs/nfsd/idmap.h
fs/nfsd/netns.h
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsfh.c
fs/nfsd/nfssvc.c
fs/nfsd/state.h
fs/nfsd/vfs.c
fs/nfsd/xdr4.h
fs/nilfs2/file.c
fs/nilfs2/gcinode.c
fs/nilfs2/ioctl.c
fs/nilfs2/namei.c
fs/nilfs2/segment.c
fs/nls/Kconfig
fs/nls/Makefile
fs/nls/mac-celtic.c [new file with mode: 0644]
fs/nls/mac-centeuro.c [new file with mode: 0644]
fs/nls/mac-croatian.c [new file with mode: 0644]
fs/nls/mac-cyrillic.c [new file with mode: 0644]
fs/nls/mac-gaelic.c [new file with mode: 0644]
fs/nls/mac-greek.c [new file with mode: 0644]
fs/nls/mac-iceland.c [new file with mode: 0644]
fs/nls/mac-inuit.c [new file with mode: 0644]
fs/nls/mac-roman.c [new file with mode: 0644]
fs/nls/mac-romanian.c [new file with mode: 0644]
fs/nls/mac-turkish.c [new file with mode: 0644]
fs/notify/fsnotify.c
fs/ntfs/file.c
fs/ocfs2/blockcheck.c
fs/ocfs2/dlm/dlmast.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlmglue.c
fs/ocfs2/export.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/ioctl.c
fs/ocfs2/move_extents.c
fs/ocfs2/namei.c
fs/ocfs2/quota_global.c
fs/ocfs2/symlink.c
fs/ocfs2/symlink.h
fs/open.c
fs/pipe.c
fs/pnode.c
fs/proc/array.c
fs/proc/base.c
fs/proc/internal.h
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/proc_namespace.c
fs/pstore/inode.c
fs/pstore/platform.c
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/ramfs/file-nommu.c
fs/read_write.c
fs/readdir.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/reiserfs.h
fs/reiserfs/resize.c
fs/reiserfs/super.c
fs/select.c
fs/signalfd.c
fs/splice.c
fs/statfs.c
fs/sync.c
fs/ubifs/debug.c
fs/ubifs/dir.c
fs/ubifs/find.c
fs/ubifs/sb.c
fs/udf/namei.c
fs/udf/super.c
fs/utimes.c
fs/xattr.c
fs/xfs/kmem.c
fs/xfs/kmem.h
fs/xfs/xfs_alloc.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
include/acpi/acpi_bus.h
include/asm-generic/Kbuild
include/asm-generic/bitsperlong.h
include/asm-generic/bug.h
include/asm-generic/dma-contiguous.h
include/asm-generic/pgtable.h
include/asm-generic/posix_types.h
include/drm/drm_crtc.h
include/drm/drm_mem_util.h
include/drm/drm_pciids.h
include/drm/exynos_drm.h
include/linux/Kbuild
include/linux/aio.h
include/linux/blkdev.h
include/linux/bootmem.h
include/linux/bsg-lib.h
include/linux/capability.h
include/linux/ceph/auth.h
include/linux/ceph/ceph_fs.h
include/linux/ceph/decode.h
include/linux/ceph/messenger.h
include/linux/ceph/osd_client.h
include/linux/ceph/osdmap.h
include/linux/clockchips.h
include/linux/compaction.h
include/linux/compat.h
include/linux/compiler-gcc.h
include/linux/cpu.h
include/linux/cred.h
include/linux/crush/crush.h
include/linux/crush/mapper.h
include/linux/device.h
include/linux/dmaengine.h
include/linux/errno.h
include/linux/eventfd.h
include/linux/eventpoll.h
include/linux/exportfs.h
include/linux/fb.h
include/linux/frontswap.h [new file with mode: 0644]
include/linux/fs.h
include/linux/fsnotify_backend.h
include/linux/fuse.h
include/linux/genetlink.h
include/linux/gpio.h
include/linux/hrtimer.h
include/linux/i2c-mux-pinctrl.h [new file with mode: 0644]
include/linux/i2c.h
include/linux/init_task.h
include/linux/input.h
include/linux/interrupt.h
include/linux/ipc_namespace.h
include/linux/irq.h
include/linux/jbd2.h
include/linux/jbd_common.h
include/linux/kcmp.h [new file with mode: 0644]
include/linux/kernel.h
include/linux/kexec.h
include/linux/key.h
include/linux/kmod.h
include/linux/kmsg_dump.h
include/linux/kvm_host.h
include/linux/lglock.h
include/linux/lockd/bind.h
include/linux/memblock.h
include/linux/mempool.h
include/linux/mlx4/device.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/sdhci-spear.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/moduleparam.h
include/linux/msdos_fs.h
include/linux/mtd/gpmi-nand.h
include/linux/mtd/mtd.h
include/linux/mtd/nand.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter/xt_HMARK.h
include/linux/nfs_fs_sb.h
include/linux/nfs_xdr.h
include/linux/nfsd/export.h
include/linux/pata_arasan_cf_data.h
include/linux/perf_event.h
include/linux/power/charger-manager.h
include/linux/power/max17042_battery.h
include/linux/power_supply.h
include/linux/prctl.h
include/linux/pstore_ram.h
include/linux/pxa2xx_ssp.h
include/linux/radix-tree.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/rio.h
include/linux/rio_drv.h
include/linux/rpmsg.h
include/linux/sched.h
include/linux/security.h
include/linux/signal.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/spi/pxa2xx_spi.h
include/linux/splice.h
include/linux/sunrpc/svc.h
include/linux/sunrpc/svcauth.h
include/linux/sunrpc/svcauth_gss.h
include/linux/swap.h
include/linux/swapfile.h [new file with mode: 0644]
include/linux/swapops.h
include/linux/syscalls.h
include/linux/task_work.h [new file with mode: 0644]
include/linux/tcp.h
include/linux/thread_info.h
include/linux/tracehook.h
include/linux/tty.h
include/linux/types.h
include/linux/usb/hcd.h
include/linux/vga_switcheroo.h
include/linux/videodev2.h
include/linux/watchdog.h
include/net/bluetooth/hci.h
include/net/cipso_ipv4.h
include/net/dst.h
include/net/inetpeer.h
include/net/ip_vs.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/phonet/gprs.h
include/net/route.h
include/net/sch_generic.h
include/net/sctp/structs.h
include/net/sctp/tsnmap.h
include/scsi/fcoe_sysfs.h [new file with mode: 0644]
include/scsi/libfcoe.h
include/scsi/libsas.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/sound/tea575x-tuner.h
include/target/target_core_fabric.h
include/trace/events/rcu.h
include/video/auo_k190xfb.h [new file with mode: 0644]
include/video/exynos_dp.h
include/video/exynos_mipi_dsim.h
include/video/omapdss.h
include/video/sh_mobile_hdmi.h
init/Kconfig
init/do_mounts.c
init/do_mounts_initrd.c
init/do_mounts_md.c
init/do_mounts_rd.c
init/initramfs.c
init/main.c
ipc/mq_sysctl.c
ipc/mqueue.c
ipc/shm.c
kernel/Makefile
kernel/cgroup.c
kernel/cpu.c
kernel/cpu_pm.c
kernel/cred.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/hrtimer.c
kernel/irq/chip.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/kcmp.c [new file with mode: 0644]
kernel/kmod.c
kernel/lglock.c [new file with mode: 0644]
kernel/panic.c
kernel/pid_namespace.c
kernel/power/hibernate.c
kernel/power/user.c
kernel/printk.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/relay.c
kernel/resource.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/idle_task.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/signal.c
kernel/smpboot.c
kernel/sys.c
kernel/sys_ni.c
kernel/task_work.c [new file with mode: 0644]
kernel/time/clockevents.c
kernel/time/ntp.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/watchdog.c
lib/Kconfig.debug
lib/btree.c
lib/dma-debug.c
lib/dynamic_queue_limits.c
lib/fault-inject.c
lib/radix-tree.c
lib/raid6/recov.c
lib/raid6/recov_ssse3.c
lib/spinlock_debug.c
lib/vsprintf.c
mm/Kconfig
mm/Makefile
mm/bootmem.c
mm/cleancache.c
mm/compaction.c
mm/filemap.c
mm/filemap_xip.c
mm/frontswap.c [new file with mode: 0644]
mm/internal.h
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mempool.c
mm/migrate.c
mm/mmap.c
mm/mremap.c
mm/nobootmem.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_cgroup.c
mm/page_io.c
mm/pagewalk.c
mm/percpu-vm.c
mm/process_vm_access.c
mm/shmem.c
mm/slub.c
mm/sparse.c
mm/swapfile.c
mm/util.c
mm/vmscan.c
net/8021q/vlan.c
net/9p/protocol.c
net/9p/trans_virtio.c
net/appletalk/ddp.c
net/ax25/af_ax25.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/Kconfig
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/tty.c
net/bluetooth/smp.c
net/bridge/br_if.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/caif/caif_dev.c
net/caif/caif_socket.c
net/can/raw.c
net/ceph/auth_none.c
net/ceph/auth_x.c
net/ceph/ceph_common.c
net/ceph/crush/crush.c
net/ceph/crush/mapper.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/dev.c
net/core/drop_monitor.c
net/core/filter.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/netpoll.c
net/core/netprio_cgroup.c
net/core/skbuff.c
net/core/sock.c
net/ieee802154/dgram.c
net/ipv4/cipso_ipv4.c
net/ipv4/esp4.c
net/ipv4/inet_connection_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_forward.c
net/ipv4/ipmr.c
net/ipv4/tcp_ipv4.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/iucv/af_iucv.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/mac80211/agg-rx.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac802154/tx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nfnetlink.c
net/netfilter/xt_HMARK.c
net/netfilter/xt_set.c
net/netlink/genetlink.c
net/nfc/llcp/sock.c
net/nfc/nci/ntf.c
net/nfc/rawsock.c
net/phonet/af_phonet.c
net/phonet/datagram.c
net/phonet/pep-gprs.c
net/phonet/pep.c
net/phonet/pn_dev.c
net/phonet/pn_netlink.c
net/phonet/socket.c
net/phonet/sysctl.c
net/rds/ib.h
net/rxrpc/ar-peer.c
net/sched/sch_atm.c
net/sched/sch_netem.c
net/sched/sch_sfb.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/output.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/rpc_pipe.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/wanrouter/Kconfig
net/wireless/ibss.c
net/wireless/reg.c
net/wireless/util.c
net/xfrm/xfrm_policy.c
scripts/checkpatch.pl
scripts/get_maintainer.pl
scripts/gfp-translate [changed mode: 0644->0755]
security/apparmor/lsm.c
security/capability.c
security/commoncap.c
security/keys/compat.c
security/keys/internal.h
security/keys/keyctl.c
security/keys/process_keys.c
security/keys/request_key.c
security/security.c
security/selinux/hooks.c
security/selinux/include/classmap.h
security/selinux/selinuxfs.c
security/smack/smack_lsm.c
sound/core/compress_offload.c
sound/i2c/other/tea575x-tuner.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/Kconfig
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_auto_parser.h
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/rme9652/hdspm.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320aic3x.h
sound/soc/codecs/wm2000.c
sound/soc/codecs/wm2200.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/fsl/imx-audmux.c
sound/soc/fsl/imx-ssi.c
sound/soc/pxa/pxa-ssp.c
sound/soc/sh/fsi.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/tegra/tegra30_ahub.c
sound/soc/tegra/tegra_wm8903.c
sound/usb/6fire/firmware.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/mixer_maps.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/stream.c
tools/hv/hv_kvp_daemon.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/parse-filter.c
tools/perf/Documentation/perfconfig.example
tools/perf/MANIFEST
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-evlist.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/design.txt
tools/perf/perf.h
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/setup.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/config.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/bitops.h
tools/perf/util/map.c
tools/perf/util/pager.c
tools/perf/util/parse-events.c
tools/perf/util/probe-event.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread_map.c
tools/perf/util/trace-event-parse.c
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/Makefile
tools/testing/selftests/kcmp/Makefile [new file with mode: 0644]
tools/testing/selftests/kcmp/kcmp_test.c [new file with mode: 0644]
tools/testing/selftests/mqueue/.gitignore [new file with mode: 0644]
tools/testing/selftests/mqueue/Makefile [new file with mode: 0644]
tools/testing/selftests/mqueue/mq_open_tests.c [new file with mode: 0644]
tools/testing/selftests/mqueue/mq_perf_tests.c [new file with mode: 0644]
usr/Kconfig
virt/kvm/assigned-dev.c
virt/kvm/eventfd.c
virt/kvm/irq_comm.c
virt/kvm/kvm_main.c

index 9b0d0267a3c3f1ea75a674fe858fac2165a8b683..658003aa94468687849d482a745fc61951d67e88 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -111,5 +111,8 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
+Gustavo Padovan <gustavo@las.ic.unicamp.br>
+Gustavo Padovan <padovan@profusion.mobi>
index d535757799feda3d447c2ddbbed201794cea9279..beef30c046b0d3181bfc353d15704bb4bdaf3da6 100644 (file)
@@ -1,18 +1,5 @@
-What:           /sys/block/rssd*/registers
-Date:           March 2012
-KernelVersion:  3.3
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps below driver information and
-                hardware registers.
-                    - S ACTive
-                    - Command Issue
-                    - Allocated
-                    - Completed
-                    - PORT IRQ STAT
-                    - HOST IRQ STAT
-
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:   This is a read-only file. Indicates the status of the device.
+Description:    This is a read-only file. Indicates the status of the device.
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
new file mode 100644 (file)
index 0000000..469d09c
--- /dev/null
@@ -0,0 +1,77 @@
+What:          /sys/bus/fcoe/ctlr_X
+Date:          March 2012
+KernelVersion: TBD
+Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:   'FCoE Controller' instances on the fcoe bus
+Attributes:
+
+       fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
+                         this value will change the dev_loss_tmo for all
+                         FCFs discovered by this controller.
+
+       lesb_link_fail:   Link Error Status Block (LESB) link failure count.
+
+       lesb_vlink_fail:  Link Error Status Block (LESB) virtual link
+                         failure count.
+
+       lesb_miss_fka:    Link Error Status Block (LESB) missed FCoE
+                         Initialization Protocol (FIP) Keep-Alives (FKA).
+
+       lesb_symb_err:    Link Error Status Block (LESB) symbolic error count.
+
+       lesb_err_block:   Link Error Status Block (LESB) block error count.
+
+       lesb_fcs_error:   Link Error Status Block (LESB) Fibre Channel
+                         Serivces error count.
+
+Notes: ctlr_X (global increment starting at 0)
+
+What:          /sys/bus/fcoe/fcf_X
+Date:          March 2012
+KernelVersion: TBD
+Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:   'FCoE FCF' instances on the fcoe bus. A FCF is a Fibre Channel
+               Forwarder, which is a FCoE switch that can accept FCoE
+               (Ethernet) packets, unpack them, and forward the embedded
+               Fibre Channel frames into a FC fabric. It can also take
+               outbound FC frames and pack them in Ethernet packets to
+               be sent to their destination on the Ethernet segment.
+Attributes:
+
+       fabric_name: Identifies the fabric that the FCF services.
+
+       switch_name: Identifies the FCF.
+
+       priority:    The switch's priority amongst other FCFs on the same
+                    fabric.
+
+       selected:    1 indicates that the switch has been selected for use;
+                    0 indicates that the swich will not be used.
+
+       fc_map:      The Fibre Channel MAP
+
+       vfid:        The Virtual Fabric ID
+
+       mac:         The FCF's MAC address
+
+       fka_peroid:  The FIP Keep-Alive peroid
+
+       fabric_state: The internal kernel state
+                     "Unknown" - Initialization value
+                     "Disconnected" - No link to the FCF/fabric
+                     "Connected" - Host is connected to the FCF
+                     "Deleted" - FCF is being removed from the system
+
+       dev_loss_tmo: The device loss timeout peroid for this FCF.
+
+Notes: A device loss infrastructre similar to the FC Transport's
+       is present in fcoe_sysfs. It is nice to have so that a
+       link flapping adapter doesn't continually advance the count
+       used to identify the discovered FCF. FCFs will exist in a
+       "Disconnected" state until either the timer expires and the
+       FCF becomes "Deleted" or the FCF is rediscovered and becomes
+       "Connected."
+
+
+Users: The first user of this interface will be the fcoeadm application,
+       which is commonly packaged in the fcoe-utils package.
index 5bc8a476c15ed5030d08d11778e6add865a89f9c..cfedf63cce151d2bdb9237ed58726a136f4cc065 100644 (file)
@@ -219,6 +219,7 @@ What:               /sys/bus/iio/devices/iio:deviceX/in_voltageY_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_anglvel_scale
@@ -273,6 +274,7 @@ What:               /sys/bus/iio/devices/iio:deviceX/in_accel_scale_available
 What:          /sys/.../iio:deviceX/in_voltageX_scale_available
 What:          /sys/.../iio:deviceX/in_voltage-voltage_scale_available
 What:          /sys/.../iio:deviceX/out_voltageX_scale_available
+What:          /sys/.../iio:deviceX/out_altvoltageX_scale_available
 What:          /sys/.../iio:deviceX/in_capacitance_scale_available
 KernelVersion: 2.635
 Contact:       linux-iio@vger.kernel.org
@@ -298,14 +300,19 @@ Description:
                gives the 3dB frequency of the filter in Hz.
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_raw
 KernelVersion: 2.6.37
 Contact:       linux-iio@vger.kernel.org
 Description:
                Raw (unscaled, no bias etc.) output voltage for
                channel Y.  The number must always be specified and
                unique if the output corresponds to a single channel.
+               While DAC like devices typically use out_voltage,
+               a continuous frequency generating device, such as
+               a DDS or PLL should use out_altvoltage.
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY&Z_raw
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY&Z_raw
 KernelVersion: 2.6.37
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -316,6 +323,8 @@ Description:
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown_mode
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown_mode
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown_mode
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown_mode
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -330,6 +339,8 @@ Description:
 
 What:          /sys/.../iio:deviceX/out_votlageY_powerdown_mode_available
 What:          /sys/.../iio:deviceX/out_voltage_powerdown_mode_available
+What:          /sys/.../iio:deviceX/out_altvotlageY_powerdown_mode_available
+What:          /sys/.../iio:deviceX/out_altvoltage_powerdown_mode_available
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -338,6 +349,8 @@ Description:
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -346,6 +359,24 @@ Description:
                normal operation. Y may be suppressed if all outputs are
                controlled together.
 
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency
+KernelVersion: 3.4.0
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Output frequency for channel Y in Hz. The number must always be
+               specified and unique if the output corresponds to a single
+               channel.
+
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_phase
+KernelVersion: 3.4.0
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Phase in radians of one frequency/clock output Y
+               (out_altvoltageY) relative to another frequency/clock output
+               (out_altvoltageZ) of the device X. The number must always be
+               specified and unique if the output corresponds to a single
+               channel.
+
 What:          /sys/bus/iio/devices/iio:deviceX/events
 KernelVersion: 2.6.35
 Contact:       linux-iio@vger.kernel.org
index dbedafb095e24d3d3a8e2d93b6cbd727268d1754..bcd88eb7ebcd240abcdbe0ef34f4a517f0f404bd 100644 (file)
@@ -65,11 +65,11 @@ snap_*
 Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
 -------------------------------------------------------------
 
-id
+snap_id
 
        The rados internal snapshot id assigned for this snapshot
 
-size
+snap_size
 
        The size of the image when this snapshot was taken.
 
index 4d55a18889813be097af27112b9e647031bf8fff..938ef71e2035e7c04203c7b65d2a120dbd96944a 100644 (file)
@@ -123,3 +123,55 @@ Description:
                half page, or a quarter page).
 
                In the case of ECC NOR, it is the ECC block size.
+
+What:          /sys/class/mtd/mtdX/ecc_strength
+Date:          April 2012
+KernelVersion: 3.4
+Contact:       linux-mtd@lists.infradead.org
+Description:
+               Maximum number of bit errors that the device is capable of
+               correcting within each region covering an ecc step.  This will
+               always be a non-negative integer.  Note that some devices will
+               have multiple ecc steps within each writesize region.
+
+               In the case of devices lacking any ECC capability, it is 0.
+
+What:          /sys/class/mtd/mtdX/bitflip_threshold
+Date:          April 2012
+KernelVersion: 3.4
+Contact:       linux-mtd@lists.infradead.org
+Description:
+               This allows the user to examine and adjust the criteria by which
+               mtd returns -EUCLEAN from mtd_read() and mtd_read_oob().  If the
+               maximum number of bit errors that were corrected on any single
+               region comprising an ecc step (as reported by the driver) equals
+               or exceeds this value, -EUCLEAN is returned.  Otherwise, absent
+               an error, 0 is returned.  Higher layers (e.g., UBI) use this
+               return code as an indication that an erase block may be
+               degrading and should be scrutinized as a candidate for being
+               marked as bad.
+
+               The initial value may be specified by the flash device driver.
+               If not, then the default value is ecc_strength.
+
+               The introduction of this feature brings a subtle change to the
+               meaning of the -EUCLEAN return code.  Previously, it was
+               interpreted to mean simply "one or more bit errors were
+               corrected".  Its new interpretation can be phrased as "a
+               dangerously high number of bit errors were corrected on one or
+               more regions comprising an ecc step".  The precise definition of
+               "dangerously high" can be adjusted by the user with
+               bitflip_threshold.  Users are discouraged from doing this,
+               however, unless they know what they are doing and have intimate
+               knowledge of the properties of their device.  Broadly speaking,
+               bitflip_threshold should be low enough to detect genuine erase
+               block degradation, but high enough to avoid the consequences of
+               a persistent return value of -EUCLEAN on devices where sticky
+               bitflips occur.  Note that if bitflip_threshold exceeds
+               ecc_strength, -EUCLEAN is never returned by the read operations.
+               Conversely, if bitflip_threshold is zero, -EUCLEAN is always
+               returned, absent a hard error.
+
+               This is generally applicable only to NAND flash devices with ECC
+               capability.  It is ignored on devices lacking ECC capability;
+               i.e., devices for which ecc_strength is zero.
index c58b236bbe0467938e601e498008d4856bbbce52..cb9258b8fd35b25b8ac750b18b4237204213fbd4 100644 (file)
@@ -671,8 +671,9 @@ ones already enabled by DEBUG.
                Chapter 14: Allocating memory
 
 The kernel provides the following general purpose memory allocators:
-kmalloc(), kzalloc(), kcalloc(), vmalloc(), and vzalloc().  Please refer to
-the API documentation for further information about them.
+kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
+vzalloc().  Please refer to the API documentation for further information
+about them.
 
 The preferred form for passing a size of a struct is the following:
 
@@ -686,6 +687,17 @@ Casting the return value which is a void pointer is redundant. The conversion
 from void pointer to any other pointer type is guaranteed by the C programming
 language.
 
+The preferred form for allocating an array is the following:
+
+       p = kmalloc_array(n, sizeof(...), ...);
+
+The preferred form for allocating a zeroed array is the following:
+
+       p = kcalloc(n, sizeof(...), ...);
+
+Both forms check for overflow on the allocation size n * sizeof(...),
+and return NULL if that occurred.
+
 
                Chapter 15: The inline disease
 
index 676bc46f9c52a476b3c8c280cca437d756726235..cda0dfb6769aee5d0fcadb8a30b4d19658259bc7 100644 (file)
@@ -3988,7 +3988,7 @@ interface and may change in the future.</para>
            from RGB to Y'CbCr color space.
            </entry>
          </row>
-         <row id = "v4l2-jpeg-chroma-subsampling">
+         <row>
            <entrytbl spanname="descr" cols="2">
              <tbody valign="top">
                <row>
index f5ac15ed0549f21c009e5adaceb05eb22449bac9..e58934c92895f159fc3200946a0d75ed50434e84 100644 (file)
@@ -986,13 +986,13 @@ http://www.thedirks.org/winnov/</ulink></para></entry>
          <row id="V4L2-PIX-FMT-Y4">
            <entry><constant>V4L2_PIX_FMT_Y4</constant></entry>
            <entry>'Y04 '</entry>
-           <entry>Old 4-bit greyscale format. Only the least significant 4 bits of each byte are used,
+           <entry>Old 4-bit greyscale format. Only the most significant 4 bits of each byte are used,
 the other bits are set to 0.</entry>
          </row>
          <row id="V4L2-PIX-FMT-Y6">
            <entry><constant>V4L2_PIX_FMT_Y6</constant></entry>
            <entry>'Y06 '</entry>
-           <entry>Old 6-bit greyscale format. Only the least significant 6 bits of each byte are used,
+           <entry>Old 6-bit greyscale format. Only the most significant 6 bits of each byte are used,
 the other bits are set to 0.</entry>
          </row>
        </tbody>
index 015c561754b7cd1ce30817ac66e67a5acf049221..008c2d73a484622d8343752391fd5312c3ff2793 100644 (file)
@@ -560,6 +560,7 @@ and discussions on the V4L mailing list.</revremark>
     &sub-g-tuner;
     &sub-log-status;
     &sub-overlay;
+    &sub-prepare-buf;
     &sub-qbuf;
     &sub-querybuf;
     &sub-querycap;
@@ -567,7 +568,6 @@ and discussions on the V4L mailing list.</revremark>
     &sub-query-dv-preset;
     &sub-query-dv-timings;
     &sub-querystd;
-    &sub-prepare-buf;
     &sub-reqbufs;
     &sub-s-hw-freq-seek;
     &sub-streamon;
index 765549ff8a71172477d04c9e7d633c4a1282f31b..a2474ecb574acd06c533f7a22bd4c5fbccacc4c6 100644 (file)
@@ -108,10 +108,9 @@ information.</para>
 /></entry>
          </row>
          <row>
-           <entry>__u32</entry>
+           <entry>struct&nbsp;v4l2_format</entry>
            <entry><structfield>format</structfield></entry>
-           <entry>Filled in by the application, preserved by the driver.
-           See <xref linkend="v4l2-format" />.</entry>
+           <entry>Filled in by the application, preserved by the driver.</entry>
          </row>
          <row>
            <entry>__u32</entry>
index e8714aa1643343de973e32f5abfa5e331b26bb7d..98a856f9ec30123b4c2788247d9e5176f2990efa 100644 (file)
@@ -89,7 +89,7 @@
          <row>
            <entry></entry>
            <entry>&v4l2-event-frame-sync;</entry>
-            <entry><structfield>frame</structfield></entry>
+            <entry><structfield>frame_sync</structfield></entry>
            <entry>Event data for event V4L2_EVENT_FRAME_SYNC.</entry>
          </row>
          <row>
index e3d5afcdafbb5b03cf9994478e3e0d8139442dda..0a4b90fcf2dab77b36ed1e52749040ddcc97a113 100644 (file)
@@ -284,13 +284,6 @@ These controls are described in <xref
            processing controls. These controls are described in <xref
            linkend="image-process-controls" />.</entry>
          </row>
-         <row>
-           <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
-           <entry>0x9d0000</entry>
-           <entry>The class containing JPEG compression controls.
-These controls are described in <xref
-               linkend="jpeg-controls" />.</entry>
-         </row>
        </tbody>
       </tgroup>
     </table>
index 0c674be0d3c6de3d05e1903f6fac4beecff51133..e0aedb7a782718c445d48f8f42ee9e14a3d5a33a 100644 (file)
@@ -1119,8 +1119,6 @@ in this page</entry>
                These constants are defined in nand.h. They are ored together to describe
                the chip functionality.
                <programlisting>
-/* Chip can not auto increment pages */
-#define NAND_NO_AUTOINCR       0x00000001
 /* Buswitdh is 16 bit */
 #define NAND_BUSWIDTH_16       0x00000002
 /* Device supports partial programming without padding */
index 888ae7b83ae4783da38b4db2f69b94d45ba193c8..a564ceea9e98cc9f5f423f0e80f54c85fe7b0ef3 100644 (file)
@@ -47,6 +47,51 @@ flexible way to enable non-common multi-display configuration. In addition to
 modelling the hardware overlays, omapdss supports virtual overlays and overlay
 managers. These can be used when updating a display with CPU or system DMA.
 
+omapdss driver support for audio
+--------------------------------
+There exist several display technologies and standards that support audio as
+well. Hence, it is relevant to update the DSS device driver to provide an audio
+interface that may be used by an audio driver or any other driver interested in
+the functionality.
+
+The audio_enable function is intended to prepare the relevant
+IP for playback (e.g., enabling an audio FIFO, taking in/out of reset
+some IP, enabling companion chips, etc). It is intended to be called before
+audio_start. The audio_disable function performs the reverse operation and is
+intended to be called after audio_stop.
+
+While a given DSS device driver may support audio, it is possible that for
+certain configurations audio is not supported (e.g., an HDMI display using a
+VESA video timing). The audio_supported function is intended to query whether
+the current configuration of the display supports audio.
+
+The audio_config function is intended to configure all the relevant audio
+parameters of the display. In order to make the function independent of any
+specific DSS device driver, a struct omap_dss_audio is defined. Its purpose
+is to contain all the required parameters for audio configuration. At the
+moment, such structure contains pointers to IEC-60958 channel status word
+and CEA-861 audio infoframe structures. This should be enough to support
+HDMI and DisplayPort, as both are based on CEA-861 and IEC-60958.
+
+The audio_enable/disable, audio_config and audio_supported functions could be
+implemented as functions that may sleep. Hence, they should not be called
+while holding a spinlock or a readlock.
+
+The audio_start/audio_stop function is intended to effectively start/stop audio
+playback after the configuration has taken place. These functions are designed
+to be used in an atomic context. Hence, audio_start should return quickly and be
+called only after all the needed resources for audio playback (audio FIFOs,
+DMA channels, companion chips, etc) have been enabled to begin data transfers.
+audio_stop is designed to only stop the audio transfers. The resources used
+for playback are released using audio_disable.
+
+The enum omap_dss_audio_state may be used to help the implementations of
+the interface to keep track of the audio state. The initial state is _DISABLED;
+then, the state transitions to _CONFIGURED, and then, when it is ready to
+play audio, to _ENABLED. The state _PLAYING is used when the audio is being
+rendered.
+
+
 Panel and controller drivers
 ----------------------------
 
@@ -156,6 +201,7 @@ timings             Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw)
                "pal" and "ntsc"
 panel_name
 tear_elim      Tearing elimination 0=off, 1=on
+output_type    Output type (video encoder only): "composite" or "svideo"
 
 There are also some debugfs files at <debugfs>/omapdss/ which show information
 about clocks and registers.
index 57aae7765c74e7a7ed60b51bd243ee94193b7c80..65610bf52ebffbad7e13d8f522df603487ef59e2 100644 (file)
@@ -60,4 +60,4 @@ Introduction
   Document Author
   ---------------
 
-  Viresh Kumar <viresh.kumar@st.com>, (c) 2010-2012 ST Microelectronics
+  Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics
index d8147b336c354e203addd40bb85bcc0abbeeded6..6518a55273e7094f62f84a5d83467fd96b26fd26 100644 (file)
@@ -38,6 +38,13 @@ read or write requests. Note that the total allocated number may be twice
 this amount, since it applies only to reads or writes (not the accumulated
 sum).
 
+To avoid priority inversion through request starvation, a request
+queue maintains a separate request pool per each cgroup when
+CONFIG_BLK_CGROUP is enabled, and this parameter applies to each such
+per-block-cgroup request pool.  IOW, if there are N block cgroups,
+each request queue may have upto N request pools, each independently
+regulated by nr_requests.
+
 read_ahead_kb (RW)
 ------------------
 Maximum number of kilobytes to read-ahead for filesystems on this block
index 3370bc4d7b9885b45040b359d858f4112418b74f..f5cfc62b7ad3fa2bfbb50fb3f33339e1d402a575 100644 (file)
@@ -287,6 +287,17 @@ iii) Messages
        the current transaction id is when you change it with this
        compare-and-swap message.
 
+    reserve_metadata_snap
+
+        Reserve a copy of the data mapping btree for use by userland.
+        This allows userland to inspect the mappings as they were when
+        this message was executed.  Use the pool's status command to
+        get the root block associated with the metadata snapshot.
+
+    release_metadata_snap
+
+        Release a previously reserved copy of the data mapping btree.
+
 'thin' target
 -------------
 
index 32e48797a14f80de6ebff7a441e489021bfce58a..9884681535ee36bf03a7d7baaa54abb36360d53d 100644 (file)
@@ -7,39 +7,39 @@ This target is read-only.
 
 Construction Parameters
 =======================
-    <version> <dev> <hash_dev> <hash_start>
+    <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
     <algorithm> <digest> <salt>
 
 <version>
-    This is the version number of the on-disk format.
+    This is the type of the on-disk hash format.
 
     0 is the original format used in the Chromium OS.
-       The salt is appended when hashing, digests are stored continuously and
-       the rest of the block is padded with zeros.
+      The salt is appended when hashing, digests are stored continuously and
+      the rest of the block is padded with zeros.
 
     1 is the current format that should be used for new devices.
-       The salt is prepended when hashing and each digest is
-       padded with zeros to the power of two.
+      The salt is prepended when hashing and each digest is
+      padded with zeros to the power of two.
 
 <dev>
-    This is the device containing the data the integrity of which needs to be
+    This is the device containing data, the integrity of which needs to be
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     <major>:<minor>.
 
 <hash_dev>
-    This is the device that that supplies the hash tree data.  It may be
+    This is the device that supplies the hash tree data.  It may be
     specified similarly to the device path and may be the same device.  If the
-    same device is used, the hash_start should be outside of the dm-verity
-    configured device size.
+    same device is used, the hash_start should be outside the configured
+    dm-verity device.
 
 <data_block_size>
-    The block size on a data device.  Each block corresponds to one digest on
-    the hash device.
+    The block size on a data device in bytes.
+    Each block corresponds to one digest on the hash device.
 
 <hash_block_size>
-    The size of a hash block.
+    The size of a hash block in bytes.
 
 <num_data_blocks>
     The number of data blocks on the data device.  Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
 Theory of operation
 ===================
 
-dm-verity is meant to be setup as part of a verified boot path.  This
+dm-verity is meant to be set up as part of a verified boot path.  This
 may be anything ranging from a boot using tboot or trustedgrub to just
 booting from a known-good device (like a USB drive or CD).
 
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
 has been authenticated in some way (cryptographic signatures, etc).
 After instantiation, all hashes will be verified on-demand during
 disk access.  If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail.  This should identify
+tree, the root hash, then the I/O will fail.  This should detect
 tampering with any data on the device and the hash data.
 
 Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis.  This allows for a lightweight hash computation on first read
-into the page cache.  Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
 
 Hash Tree
 ---------
 
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
-is of some block data on disk.  If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
 
 Each entry in the tree is a collection of neighboring nodes that fit in one
 block.  The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
 On-disk format
 ==============
 
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
 
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
-       uint8_t signature[8]
-               "verity\0\0";
-
-       uint8_t version;
-               1 - current format
-
-       uint8_t data_block_bits;
-               log2(data block size)
-
-       uint8_t hash_block_bits;
-               log2(hash block size)
-
-       uint8_t pad1[1];
-               zero padding
-
-       uint16_t salt_size;
-               big-endian salt size
-
-       uint8_t pad2[2];
-               zero padding
-
-       uint32_t data_blocks_hi;
-               big-endian high 32 bits of the 64-bit number of data blocks
-
-       uint32_t data_blocks_lo;
-               big-endian low 32 bits of the 64-bit number of data blocks
-
-       uint8_t algorithm[16];
-               cryptographic algorithm
-
-       uint8_t salt[384];
-               salt (the salt size is specified above)
-
-       uint8_t pad3[88];
-               zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
 
 Directly following the header (and with sector number padded to the next hash
 block boundary) are the hash blocks which are stored a depth at a time
 (starting from the root), sorted in order of increasing index.
 
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+  http://code.google.com/p/cryptsetup/wiki/DMVerity
+
 Status
 ======
 V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
 
 Example
 =======
-
-Setup a device:
-  dmsetup create vroot --table \
-    "0 2097152 "\
-    "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+  # dmsetup create vroot --readonly --table \
+    "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "1234000000000000000000000000000000000000000000000000000000000000"
 
 A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver.  This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
-    git://sources.redhat.com/git/lvm2
-    http://sourceware.org/git/?p=lvm2.git
-    http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
-       4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+  # veritysetup format /dev/sda1 /dev/sda2
+  ...
+  Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+  # veritysetup create vroot /dev/sda1 /dev/sda2 \
+    4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
new file mode 100644 (file)
index 0000000..ae8af16
--- /dev/null
@@ -0,0 +1,93 @@
+Pinctrl-based I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses pin multiplexing to
+route the I2C signals, and represents the pin multiplexing configuration
+using the pinctrl device tree bindings.
+
+                                 +-----+  +-----+
+                                 | dev |  | dev |
+    +------------------------+   +-----+  +-----+
+    | SoC                    |      |        |
+    |                   /----|------+--------+
+    |   +---+   +------+     | child bus A, on first set of pins
+    |   |I2C|---|Pinmux|     |
+    |   +---+   +------+     | child bus B, on second set of pins
+    |                   \----|------+--------+--------+
+    |                        |      |        |        |
+    +------------------------+  +-----+  +-----+  +-----+
+                                | dev |  | dev |  | dev |
+                                +-----+  +-----+  +-----+
+
+Required properties:
+- compatible: i2c-mux-pinctrl
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+  port is connected to.
+
+Also required are:
+
+* Standard pinctrl properties that specify the pin mux state for each child
+  bus. See ../pinctrl/pinctrl-bindings.txt.
+
+* Standard I2C mux properties. See mux.txt in this directory.
+
+* I2C child bus nodes. See mux.txt in this directory.
+
+For each named state defined in the pinctrl-names property, an I2C child bus
+will be created. I2C child bus numbers are assigned based on the index into
+the pinctrl-names property.
+
+The only exception is that no bus will be created for a state named "idle". If
+such a state is defined, it must be the last entry in pinctrl-names. For
+example:
+
+       pinctrl-names = "ddc", "pta", "idle"  ->  ddc = bus 0, pta = bus 1
+       pinctrl-names = "ddc", "idle", "pta"  ->  Invalid ("idle" not last)
+       pinctrl-names = "idle", "ddc", "pta"  ->  Invalid ("idle" not last)
+
+Whenever an access is made to a device on a child bus, the relevant pinctrl
+state will be programmed into hardware.
+
+If an idle state is defined, whenever an access is not being made to a device
+on a child bus, the idle pinctrl state will be programmed into hardware.
+
+If an idle state is not defined, the most recently used pinctrl state will be
+left programmed into hardware whenever no access is being made of a device on
+a child bus.
+
+Example:
+
+       i2cmux {
+               compatible = "i2c-mux-pinctrl";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               i2c-parent = <&i2c1>;
+
+               pinctrl-names = "ddc", "pta", "idle";
+               pinctrl-0 = <&state_i2cmux_ddc>;
+               pinctrl-1 = <&state_i2cmux_pta>;
+               pinctrl-2 = <&state_i2cmux_idle>;
+
+               i2c@0 {
+                       reg = <0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       eeprom {
+                               compatible = "eeprom";
+                               reg = <0x50>;
+                       };
+               };
+
+               i2c@1 {
+                       reg = <1>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       eeprom {
+                               compatible = "eeprom";
+                               reg = <0x50>;
+                       };
+               };
+       };
+
index a00c94ccbdeeb871c440b770de814d11c6f757d7..0b96e5737d3a569ca4acfec137cc6870d1b56880 100644 (file)
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible : "fsl,mma8450".
+- reg: the I2C address of MMA8450
 
 Example:
 
index 19f6af47a792986c23a9ed033e15e8639ec5c51b..baf07987ae6863c68b39efa5ba1227a327009337 100644 (file)
@@ -46,8 +46,8 @@ Examples:
 
 ecspi@70010000 { /* ECSPI1 */
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
+                  <&gpio4 25 0>; /* GPIO4_25 */
        status = "okay";
 
        pmic: mc13892@0 {
index c7e404b3ef0515b5527583cae877ab48c5d69595..fea541ee8b34a18fbf1aaef9276c17ef992db830 100644 (file)
@@ -29,6 +29,6 @@ esdhc@70008000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70008000 0x4000>;
        interrupts = <2>;
-       cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
-       wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
+       cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
+       wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
 };
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
new file mode 100644 (file)
index 0000000..1a5bbd3
--- /dev/null
@@ -0,0 +1,33 @@
+* Freescale General-Purpose Media Interface (GPMI)
+
+The GPMI nand controller provides an interface to control the
+NAND flash chips. We support only one NAND chip now.
+
+Required properties:
+  - compatible : should be "fsl,<chip>-gpmi-nand"
+  - reg : should contain registers location and length for gpmi and bch.
+  - reg-names: Should contain the reg names "gpmi-nand" and "bch"
+  - interrupts : The first is the DMA interrupt number for GPMI.
+                 The second is the BCH interrupt number.
+  - interrupt-names : The interrupt names "gpmi-dma", "bch";
+  - fsl,gpmi-dma-channel : Should contain the dma channel it uses.
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Examples:
+
+gpmi-nand@8000c000 {
+       compatible = "fsl,imx28-gpmi-nand";
+       #address-cells = <1>;
+       #size-cells = <1>;
+       reg = <0x8000c000 2000>, <0x8000a000 2000>;
+       reg-names = "gpmi-nand", "bch";
+       interrupts = <88>, <41>;
+       interrupt-names = "gpmi-dma", "bch";
+       fsl,gpmi-dma-channel = <4>;
+
+       partition@0 {
+       ...
+       };
+};
diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.txt b/Documentation/devicetree/bindings/mtd/mxc-nand.txt
new file mode 100644 (file)
index 0000000..b5833d1
--- /dev/null
@@ -0,0 +1,19 @@
+* Freescale's mxc_nand
+
+Required properties:
+- compatible: "fsl,imxXX-nand"
+- reg: address range of the nfc block
+- interrupts: irq to be used
+- nand-bus-width: see nand.txt
+- nand-ecc-mode: see nand.txt
+- nand-on-flash-bbt: see nand.txt
+
+Example:
+
+       nand@d8000000 {
+               compatible = "fsl,imx27-nand";
+               reg = <0xd8000000 0x1000>;
+               interrupts = <29>;
+               nand-bus-width = <8>;
+               nand-ecc-mode = "hw";
+       };
index 7ab9e1a2d8bec19fac2283b5703fae60d2858998..4616fc28ee86e83793550a0ceda9fa25e4b46167 100644 (file)
@@ -19,6 +19,6 @@ ethernet@83fec000 {
        reg = <0x83fec000 0x4000>;
        interrupts = <87>;
        phy-mode = "mii";
-       phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
+       phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
        local-mac-address = [00 04 9F 01 1B B9];
 };
index 82b43f9158579cdf94b4134ab59d111f45c08a11..a4119f6422d9527e1a5e50d672c70b62aa9d0ad3 100644 (file)
@@ -1626,3 +1626,5 @@ MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11               1587
 MX6Q_PAD_SD2_DAT3__GPIO_1_12                   1588
 MX6Q_PAD_SD2_DAT3__SJC_DONE                    1589
 MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3              1590
+MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID          1591
+MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID              1592
index 9841057d112bb6e4c46299166ef7c5a9a6da5521..4256a6df9b79355d8c17f5f393c3956d6a5a78ca 100644 (file)
@@ -17,6 +17,6 @@ ecspi@70010000 {
        reg = <0x70010000 0x4000>;
        interrupts = <36>;
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
+                  <&gpio3 25 0>; /* GPIO3_25 */
 };
index 6eab91747a86a03c45c60d9bdca74fb28215b3a3..db4d3af3643c407ffad6e4ead2dd81d8c4ab36cf 100644 (file)
@@ -3,6 +3,7 @@ Device tree binding vendor prefix registry.  Keep list in alphabetical order.
 This isn't an exhaustive list, but you should add new prefixes to it before
 using them to avoid name-space collisions.
 
+ad     Avionic Design GmbH
 adi    Analog Devices, Inc.
 amcc   Applied Micro Circuits Corporation (APM, formally AMCC)
 apm    Applied Micro Circuits Corporation (APM)
index ebaffe208ccb73e0727879ab1806830b5a66b5dc..56000b33340bbe33f0b141934c756470d32ecbbd 100644 (file)
@@ -606,3 +606,9 @@ Why:        There are two mci drivers: at91-mci and atmel-mci. The PDC support
 Who:   Ludovic Desroches <ludovic.desroches@atmel.com>
 
 ----------------------------
+
+What:  net/wanrouter/
+When:  June 2013
+Why:   Unsupported/unmaintained/unused since 2.6
+
+----------------------------
index d449e632e6a09fca5ca0fedd6bdc2cf87eb44a96..8e2da1e06e3b2371eb82ef07105e63ad97d224b6 100644 (file)
@@ -61,6 +61,7 @@ ata *);
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        int (*removexattr) (struct dentry *, const char *);
        int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
+       void (*update_time)(struct inode *, struct timespec *, int);
 
 locking rules:
        all may block
@@ -87,6 +88,8 @@ getxattr:     no
 listxattr:     no
 removexattr:   yes
 fiemap:                no
+update_time:   no
+
        Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
        cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
index 912af6ce56269223f02899cd3856935f8443ce89..fb0a6aeb936c86237fe19bcdf630339fc43ad348 100644 (file)
@@ -40,6 +40,7 @@ Table of Contents
   3.4  /proc/<pid>/coredump_filter - Core dump filtering settings
   3.5  /proc/<pid>/mountinfo - Information about mounts
   3.6  /proc/<pid>/comm  & /proc/<pid>/task/<tid>/comm
+  3.7   /proc/<pid>/task/<tid>/children - Information about task children
 
   4    Configuring procfs
   4.1  Mount options
@@ -310,6 +311,11 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
   start_data    address above which program data+bss is placed
   end_data      address below which program data+bss is placed
   start_brk     address above which program heap can be expanded with brk()
+  arg_start     address above which program command line is placed
+  arg_end       address below which program command line is placed
+  env_start     address above which program environment is placed
+  env_end       address below which program environment is placed
+  exit_code     the thread's exit_code in the form reported by the waitpid system call
 ..............................................................................
 
 The /proc/PID/maps file containing the currently mapped memory regions and
@@ -1578,6 +1584,23 @@ then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
 comm value.
 
 
+3.7    /proc/<pid>/task/<tid>/children - Information about task children
+-------------------------------------------------------------------------
+This file provides a fast way to retrieve first level children pids
+of a task pointed by <pid>/<tid> pair. The format is a space separated
+stream of pids.
+
+Note the "first level" here -- if a child has own children they will
+not be listed here, one needs to read /proc/<children-pid>/task/<tid>/children
+to obtain the descendants.
+
+Since this interface is intended to be fast and cheap it doesn't
+guarantee to provide precise results and some children might be
+skipped, especially if they've exited right after we printed their
+pids, so one need to either stop or freeze processes being inspected
+if precise results are needed.
+
+
 ------------------------------------------------------------------------------
 Configuring procfs
 ------------------------------------------------------------------------------
index ef19f91a0f12021f5c003f6084711e6b3b4897cc..efd23f4817044ac9d55932bd9476d309a02918dc 100644 (file)
@@ -363,6 +363,7 @@ struct inode_operations {
        ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        int (*removexattr) (struct dentry *, const char *);
+       void (*update_time)(struct inode *, struct timespec *, int);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -471,6 +472,9 @@ otherwise noted.
   removexattr: called by the VFS to remove an extended attribute from
        a file. This method is called by removexattr(2) system call.
 
+  update_time: called by the VFS to update a specific time or the i_version of
+       an inode.  If this is not defined the VFS will update the inode itself
+       and call mark_inode_dirty_sync.
 
 The Address Space Object
 ========================
index 84d46c0c71a37d627a5773e90172987481edc1a2..c86b50c03ea8f02276d2a35deb4a6f5822d78be9 100644 (file)
@@ -6,7 +6,9 @@ Supported chips:
     Prefix: 'coretemp'
     CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
                               0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
-                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield)
+                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
+                              0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
+                              0x36 (Cedar Trail Atom)
     Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
                Volume 3A: System Programming Guide
                http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
@@ -52,6 +54,17 @@ Some information comes from ark.intel.com
 
 Process                Processor                                       TjMax(C)
 
+22nm           Core i5/i7 Processors
+               i7 3920XM, 3820QM, 3720QM, 3667U, 3520M         105
+               i5 3427U, 3360M/3320M                           105
+               i7 3770/3770K                                   105
+               i5 3570/3570K, 3550, 3470/3450                  105
+               i7 3770S                                        103
+               i5 3570S/3550S, 3475S/3470S/3450S               103
+               i7 3770T                                        94
+               i5 3570T                                        94
+               i5 3470T                                        91
+
 32nm           Core i3/i5/i7 Processors
                i7 660UM/640/620, 640LM/620, 620M, 610E         105
                i5 540UM/520/430, 540M/520/450/430              105
@@ -65,6 +78,11 @@ Process              Processor                                       TjMax(C)
                U3400                                           105
                P4505/P4500                                     90
 
+32nm           Atom Processors
+               Z2460                                           90
+               D2700/2550/2500                                 100
+               N2850/2800/2650/2600                            100
+
 45nm           Xeon Processors 5400 Quad-Core
                X5492, X5482, X5472, X5470, X5460, X5450        85
                E5472, E5462, E5450/40/30/20/10/05              85
@@ -85,6 +103,8 @@ Process              Processor                                       TjMax(C)
                N475/470/455/450                                100
                N280/270                                        90
                330/230                                         125
+               E680/660/640/620                                90
+               E680T/660T/640T/620T                            110
 
 45nm           Core2 Processors
                Solo ULV SU3500/3300                            100
index 42c17c1fb3cdf74e25a12e11d4416dd41e5f1f9a..b0ff2ab596ce56d4082850d19cc446a6d0c2782e 100644 (file)
@@ -18,9 +18,9 @@ For the most up-to-date list of functionality constants, please check
                                   adapters typically can not do these)
   I2C_FUNC_10BIT_ADDR             Handles the 10-bit address extensions
   I2C_FUNC_PROTOCOL_MANGLING      Knows about the I2C_M_IGNORE_NAK,
-                                  I2C_M_REV_DIR_ADDR, I2C_M_NOSTART and
-                                  I2C_M_NO_RD_ACK flags (which modify the
-                                  I2C protocol!)
+                                  I2C_M_REV_DIR_ADDR and I2C_M_NO_RD_ACK
+                                  flags (which modify the I2C protocol!)
+  I2C_FUNC_NOSTART                Can skip repeated start sequence
   I2C_FUNC_SMBUS_QUICK            Handles the SMBus write_quick command
   I2C_FUNC_SMBUS_READ_BYTE        Handles the SMBus read_byte command
   I2C_FUNC_SMBUS_WRITE_BYTE       Handles the SMBus write_byte command
@@ -50,6 +50,9 @@ A few combinations of the above flags are also defined for your convenience:
                                   emulated by a real I2C adapter (using
                                   the transparent emulation layer)
 
+In kernel versions prior to 3.5 I2C_FUNC_NOSTART was implemented as
+part of I2C_FUNC_PROTOCOL_MANGLING.
+
 
 ADAPTER IMPLEMENTATION
 ----------------------
index 10518dd588146f6d57c1e3c9c912bb525bc86425..0b3e62d1f77a1853765338afd9ed9d6ea56902d2 100644 (file)
@@ -49,7 +49,9 @@ a byte read, followed by a byte write:
 Modified transactions
 =====================
 
-We have found some I2C devices that needs the following modifications:
+The following modifications to the I2C protocol can also be generated,
+with the exception of I2C_M_NOSTART these are usually only needed to
+work around device issues:
 
   Flag I2C_M_NOSTART: 
     In a combined transaction, no 'S Addr Wr/Rd [A]' is generated at some
@@ -60,6 +62,11 @@ We have found some I2C devices that needs the following modifications:
     we do not generate Addr, but we do generate the startbit S. This will
     probably confuse all other clients on your bus, so don't try this.
 
+    This is often used to gather transmits from multiple data buffers in
+    system memory into something that appears as a single transfer to the
+    I2C device but may also be used between direction changes by some
+    rare devices.
+
   Flags I2C_M_REV_DIR_ADDR
     This toggles the Rd/Wr flag. That is, if you want to do a write, but
     need to emit an Rd instead of a Wr, or vice versa, you set this
index 506c7390c2b90e37b0df7f1b8e93dc7ff2b540fd..13f1aa09b938c09c91421ad55dc168ef67cd8347 100644 (file)
@@ -86,7 +86,7 @@ There is also a gitweb interface available at
 http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
 
 More information about kexec-tools can be found at
-http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
+http://horms.net/projects/kexec/
 
 3) Unpack the tarball with the tar command, as follows:
 
index c45513d806abc48d3e11fa756e4f11138fb179ce..a92c5ebf373e2bf4bea68072b58fbc0471ad9c13 100644 (file)
@@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        sched_debug     [KNL] Enables verbose scheduler debug messages.
 
+       skew_tick=      [KNL] Offset the periodic timer tick per cpu to mitigate
+                       xtime_lock contention on larger systems, and/or RCU lock
+                       contention on all systems with CONFIG_MAXSMP set.
+                       Format: { "0" | "1" }
+                       0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
+                       1 -- enable.
+                       Note: increases power consumption, thus should only be
+                       enabled if running jitter sensitive (HPC/RT) workloads.
+
        security=       [SECURITY] Choose a security module to enable at boot.
                        If this boot parameter is not specified, only the first
                        security module asking for security registration will be
index ab1e8d7004c5238f9d4b30ec0137fd5f3e298226..5cb9a1972460fdcd2909f3a9fd06dd4c265a2921 100644 (file)
@@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC
 (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
 FF1152AMT0221 D1215994A VIRTEX FPGA board.
 
-DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing this driver.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether
+MAC 10/100 Universal version 4.0 have been used for developing this driver.
 
 This driver supports both the platform bus and PCI.
 
@@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature.
 When one or more packets are received, an interrupt happens. The interrupts
 are not queued so the driver has to scan all the descriptors in the ring during
 the receive process.
-This is based on NAPI so the interrupt handler signals only if there is work to be
-done, and it exits.
+This is based on NAPI so the interrupt handler signals only if there is work
+to be done, and it exits.
 Then the poll method will be scheduled at some future point.
 The incoming packets are stored, by the DMA, in a list of pre-allocated socket
 buffers in order to avoid the memcpy (Zero-copy).
 
 4.3) Timer-Driver Interrupt
-Instead of having the device that asynchronously notifies the frame receptions, the
-driver configures a timer to generate an interrupt at regular intervals.
-Based on the granularity of the timer, the frames that are received by the device
-will experience different levels of latency. Some NICs have dedicated timer
-device to perform this task. STMMAC can use either the RTC device or the TMU
-channel 2  on STLinux platforms.
+Instead of having the device that asynchronously notifies the frame receptions,
+the driver configures a timer to generate an interrupt at regular intervals.
+Based on the granularity of the timer, the frames that are received by the
+device will experience different levels of latency. Some NICs have dedicated
+timer device to perform this task. STMMAC can use either the RTC device or the
+TMU channel 2  on STLinux platforms.
 The timers frequency can be passed to the driver as parameter; when change it,
 take care of both hardware capability and network stability/performance impact.
-Several performance tests on STM platforms showed this optimisation allows to spare
-the CPU while having the maximum throughput.
+Several performance tests on STM platforms showed this optimisation allows to
+spare the CPU while having the maximum throughput.
 
 4.4) WOL
-Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC
-core.
+Wake up on Lan feature through Magic and Unicast frames are supported for the
+GMAC core.
 
 4.5) DMA descriptors
 Driver handles both normal and enhanced descriptors. The latter has been only
@@ -106,7 +106,8 @@ Several driver's information can be passed through the platform
 These are included in the include/linux/stmmac.h header file
 and detailed below as well:
 
- struct plat_stmmacenet_data {
+struct plat_stmmacenet_data {
+       char *phy_bus_name;
        int bus_id;
        int phy_addr;
        int interface;
@@ -124,19 +125,24 @@ and detailed below as well:
        void (*bus_setup)(void __iomem *ioaddr);
        int (*init)(struct platform_device *pdev);
        void (*exit)(struct platform_device *pdev);
+       void *custom_cfg;
+       void *custom_data;
        void *bsp_priv;
  };
 
 Where:
+ o phy_bus_name: phy bus name to attach to the stmmac.
  o bus_id: bus identifier.
  o phy_addr: the physical address can be passed from the platform.
            If it is set to -1 the driver will automatically
            detect it at run-time by probing all the 32 addresses.
  o interface: PHY device's interface.
  o mdio_bus_data: specific platform fields for the MDIO bus.
- o pbl: the Programmable Burst Length is maximum number of beats to
+ o dma_cfg: internal DMA parameters
+   o pbl: the Programmable Burst Length is maximum number of beats to
        be transferred in one DMA transaction.
        GMAC also enables the 4xPBL by default.
+   o fixed_burst/mixed_burst/burst_len
  o clk_csr: fixed CSR Clock range selection.
  o has_gmac: uses the GMAC core.
  o enh_desc: if sets the MAC will use the enhanced descriptor structure.
@@ -160,8 +166,9 @@ Where:
             this is sometime necessary on some platforms (e.g. ST boxes)
             where the HW needs to have set some PIO lines or system cfg
             registers.
- o custom_cfg: this is a custom configuration that can be passed while
-             initialising the resources.
+ o custom_cfg/custom_data: this is a custom configuration that can be passed
+                          while initialising the resources.
+ o bsp_priv: another private poiter.
 
 For MDIO bus The we have:
 
@@ -180,7 +187,6 @@ Where:
  o irqs: list of IRQs, one per PHY.
  o probed_phy_irq: if irqs is NULL, use this for probed PHY.
 
-
 For DMA engine we have the following internal fields that should be
 tuned according to the HW capabilities.
 
index fdcca991df3067100330da55136d25b240fde3f5..b4f7f4b23f648e3e129bf3b8d31b68e17e004ee2 100644 (file)
@@ -44,6 +44,16 @@ Charger Manager supports the following:
        Normally, the platform will need to resume and suspend some devices
        that are used by Charger Manager.
 
+* Support for premature full-battery event handling
+       If the battery voltage drops by "fullbatt_vchkdrop_uV" after
+       "fullbatt_vchkdrop_ms" from the full-battery event, the framework
+       restarts charging. This check is also performed while suspended by
+       setting wakeup time accordingly and using suspend_again.
+
+* Support for uevent-notify
+       With the charger-related events, the device sends
+       notification to users with UEVENT.
+
 2. Global Charger-Manager Data related with suspend_again
 ========================================================
 In order to setup Charger Manager with suspend-again feature
@@ -55,7 +65,7 @@ if there are multiple batteries. If there are multiple batteries, the
 multiple instances of Charger Manager share the same charger_global_desc
 and it will manage in-suspend monitoring for all instances of Charger Manager.
 
-The user needs to provide all the two entries properly in order to activate
+The user needs to provide all the three entries properly in order to activate
 in-suspend monitoring:
 
 struct charger_global_desc {
@@ -74,6 +84,11 @@ bool (*rtc_only_wakeup)(void);
        same struct. If there is any other wakeup source triggered the
        wakeup, it should return false. If the "rtc" is the only wakeup
        reason, it should return true.
+
+bool assume_timer_stops_in_suspend;
+       : if true, Charger Manager assumes that
+       the timer (CM uses jiffies as timer) stops during suspend. Then, CM
+       assumes that the suspend-duration is same as the alarm length.
 };
 
 3. How to setup suspend_again
@@ -111,6 +126,16 @@ enum polling_modes polling_mode;
          CM_POLL_CHARGING_ONLY: poll this battery if and only if the
                                 battery is being charged.
 
+unsigned int fullbatt_vchkdrop_ms;
+unsigned int fullbatt_vchkdrop_uV;
+       : If both have non-zero values, Charger Manager will check the
+       battery voltage drop fullbatt_vchkdrop_ms after the battery is fully
+       charged. If the voltage drop is over fullbatt_vchkdrop_uV, Charger
+       Manager will try to recharge the battery by disabling and enabling
+       chargers. Recharge with voltage drop condition only (without delay
+       condition) is needed to be implemented with hardware interrupts from
+       fuel gauges or charger devices/chips.
+
 unsigned int fullbatt_uV;
        : If specified with a non-zero value, Charger Manager assumes
        that the battery is full (capacity = 100) if the battery is not being
@@ -122,6 +147,8 @@ unsigned int polling_interval_ms;
        this battery every polling_interval_ms or more frequently.
 
 enum data_source battery_present;
+       : CM_BATTERY_PRESENT: assume that the battery exists.
+       CM_NO_BATTERY: assume that the battery does not exists.
        CM_FUEL_GAUGE: get battery presence information from fuel gauge.
        CM_CHARGER_STAT: get battery presence from chargers.
 
@@ -151,7 +178,17 @@ bool measure_battery_temp;
        the value of measure_battery_temp.
 };
 
-5. Other Considerations
+5. Notify Charger-Manager of charger events: cm_notify_event()
+=========================================================
+If there is an charger event is required to notify
+Charger Manager, a charger device driver that triggers the event can call
+cm_notify_event(psy, type, msg) to notify the corresponding Charger Manager.
+In the function, psy is the charger driver's power_supply pointer, which is
+associated with Charger-Manager. The parameter "type"
+is the same as irq's type (enum cm_event_types). The event message "msg" is
+optional and is effective only if the event type is "UNDESCRIBED" or "OTHERS".
+
+6. Other Considerations
 =======================
 
 At the charger/battery-related events such as battery-pulled-out,
index 9f16c5178b662b8f9ec67f3dd7eafd6f4c89e39a..211831d4095fef63eacb13a2dbde8d647b5499a6 100644 (file)
@@ -84,6 +84,8 @@ are already charged or discharging, 'n/a' can be displayed (or
 HEALTH - represents health of the battery, values corresponds to
 POWER_SUPPLY_HEALTH_*, defined in battery.h.
 
+VOLTAGE_OCV - open circuit voltage of the battery.
+
 VOLTAGE_MAX_DESIGN, VOLTAGE_MIN_DESIGN - design values for maximal and
 minimal power supply voltages. Maximal/minimal means values of voltages
 when battery considered "full"/"empty" at normal conditions. Yes, there is
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
new file mode 100644 (file)
index 0000000..f7be84f
--- /dev/null
@@ -0,0 +1,57 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   /etc/passwd to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set no_new_privs.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
+
+Be careful, though: LSMs might also not tighten constraints on exec
+in no_new_privs mode.  (This means that setting up a general-purpose
+service launcher to set no_new_privs before execing daemons may
+interfere with LSM-based sandboxing.)
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve.  An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has no_new_privs set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set.  In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.
index f0ab5cf28fcae0a1ff0783bf8a4ddc477447f585..4a7b54bd37e8c18b2eebfcd21608a2488589641b 100644 (file)
@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    security issue, or some "oh, that's not good" issue.  In short, something
    critical.
+ - Serious issues as reported by a user of a distribution kernel may also
+   be considered if they fix a notable performance or interactivity issue.
+   As these fixes are not as obvious and have a higher risk of a subtle
+   regression they should only be submitted by a distribution kernel
+   maintainer and include an addendum linking to a bugzilla entry if it
+   exists and additional information on the user-visible impact.
  - New device IDs and quirks are also accepted.
  - No "theoretical race condition" issues, unless an explanation of how the
    race can be exploited is also provided.
index 88fd7f5c8dcd61307171b3af852541d06a984380..13d6166d7a2798fbd54b39a90b533ad5ddebe9eb 100644 (file)
@@ -225,6 +225,13 @@ a queue must be less or equal then msg_max.
 maximum  message size value (it is every  message queue's attribute set during
 its creation).
 
+/proc/sys/fs/mqueue/msg_default is  a read/write  file for setting/getting the
+default number of messages in a queue value if attr parameter of mq_open(2) is
+NULL. If it exceed msg_max, the default value is initialized msg_max.
+
+/proc/sys/fs/mqueue/msgsize_default is a read/write file for setting/getting
+the default message size value if attr parameter of mq_open(2) is NULL. If it
+exceed msgsize_max, the default value is initialized msgsize_max.
 
 4. /proc/sys/fs/epoll - Configuration options for the epoll interface
 --------------------------------------------------------
index 930126698a0f5b0f6e2c458b53604d581b201063..2c9948379469c1dba5f8c7ef711567067727ff08 100644 (file)
@@ -1930,6 +1930,23 @@ The "pte_enc" field provides a value that can OR'ed into the hash
 PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
 into the hash PTE second double word).
 
+4.75 KVM_IRQFD
+
+Capability: KVM_CAP_IRQFD
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_irqfd (in)
+Returns: 0 on success, -1 on error
+
+Allows setting an eventfd to directly trigger a guest interrupt.
+kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
+kvm_irqfd.gsi specifies the irqchip pin toggled by this event.  When
+an event is tiggered on the eventfd, an interrupt is injected into
+the guest using the specified gsi pin.  The irqfd is removed using
+the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
+and kvm_irqfd.gsi.
+
+
 5. The kvm_run structure
 ------------------------
 
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt
new file mode 100644 (file)
index 0000000..37067cf
--- /dev/null
@@ -0,0 +1,278 @@
+Frontswap provides a "transcendent memory" interface for swap pages.
+In some environments, dramatic performance savings may be obtained because
+swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
+
+(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends"
+and the only necessary changes to the core kernel for transcendent memory;
+all other supporting code -- the "backends" -- is implemented as drivers.
+See the LWN.net article "Transcendent memory in a nutshell" for a detailed
+overview of frontswap and related kernel parts:
+https://lwn.net/Articles/454795/ )
+
+Frontswap is so named because it can be thought of as the opposite of
+a "backing" store for a swap device.  The storage is assumed to be
+a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming
+to the requirements of transcendent memory (such as Xen's "tmem", or
+in-kernel compressed memory, aka "zcache", or future RAM-like devices);
+this pseudo-RAM device is not directly accessible or addressable by the
+kernel and is of unknown and possibly time-varying size.  The driver
+links itself to frontswap by calling frontswap_register_ops to set the
+frontswap_ops funcs appropriately and the functions it provides must
+conform to certain policies as follows:
+
+An "init" prepares the device to receive frontswap pages associated
+with the specified swap device number (aka "type").  A "store" will
+copy the page to transcendent memory and associate it with the type and
+offset associated with the page. A "load" will copy the page, if found,
+from transcendent memory into kernel memory, but will NOT remove the page
+from from transcendent memory.  An "invalidate_page" will remove the page
+from transcendent memory and an "invalidate_area" will remove ALL pages
+associated with the swap type (e.g., like swapoff) and notify the "device"
+to refuse further stores with that swap type.
+
+Once a page is successfully stored, a matching load on the page will normally
+succeed.  So when the kernel finds itself in a situation where it needs
+to swap out a page, it first attempts to use frontswap.  If the store returns
+success, the data has been successfully saved to transcendent memory and
+a disk write and, if the data is later read back, a disk read are avoided.
+If a store returns failure, transcendent memory has rejected the data, and the
+page can be written to swap as usual.
+
+If a backend chooses, frontswap can be configured as a "writethrough
+cache" by calling frontswap_writethrough().  In this mode, the reduction
+in swap device writes is lost (and also a non-trivial performance advantage)
+in order to allow the backend to arbitrarily "reclaim" space used to
+store frontswap pages to more completely manage its memory usage.
+
+Note that if a page is stored and the page already exists in transcendent memory
+(a "duplicate" store), either the store succeeds and the data is overwritten,
+or the store fails AND the page is invalidated.  This ensures stale data may
+never be obtained from frontswap.
+
+If properly configured, monitoring of frontswap is done via debugfs in
+the /sys/kernel/debug/frontswap directory.  The effectiveness of
+frontswap can be measured (across all swap devices) with:
+
+failed_stores  - how many store attempts have failed
+loads          - how many loads were attempted (all should succeed)
+succ_stores    - how many store attempts have succeeded
+invalidates    - how many invalidates were attempted
+
+A backend implementation may provide additional metrics.
+
+FAQ
+
+1) Where's the value?
+
+When a workload starts swapping, performance falls through the floor.
+Frontswap significantly increases performance in many such workloads by
+providing a clean, dynamic interface to read and write swap pages to
+"transcendent memory" that is otherwise not directly addressable to the kernel.
+This interface is ideal when data is transformed to a different form
+and size (such as with compression) or secretly moved (as might be
+useful for write-balancing for some RAM-like devices).  Swap pages (and
+evicted page-cache pages) are a great use for this kind of slower-than-RAM-
+but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
+cleancache) interface to transcendent memory provides a nice way to read
+and write -- and indirectly "name" -- the pages.
+
+Frontswap -- and cleancache -- with a fairly small impact on the kernel,
+provides a huge amount of flexibility for more dynamic, flexible RAM
+utilization in various system configurations:
+
+In the single kernel case, aka "zcache", pages are compressed and
+stored in local memory, thus increasing the total anonymous pages
+that can be safely kept in RAM.  Zcache essentially trades off CPU
+cycles used in compression/decompression for better memory utilization.
+Benchmarks have shown little or no impact when memory pressure is
+low while providing a significant performance improvement (25%+)
+on some workloads under high memory pressure.
+
+"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory
+support for clustered systems.  Frontswap pages are locally compressed
+as in zcache, but then "remotified" to another system's RAM.  This
+allows RAM to be dynamically load-balanced back-and-forth as needed,
+i.e. when system A is overcommitted, it can swap to system B, and
+vice versa.  RAMster can also be configured as a memory server so
+many servers in a cluster can swap, dynamically as needed, to a single
+server configured with a large amount of RAM... without pre-configuring
+how much of the RAM is available for each of the clients!
+
+In the virtual case, the whole point of virtualization is to statistically
+multiplex physical resources acrosst the varying demands of multiple
+virtual machines.  This is really hard to do with RAM and efforts to do
+it well with no kernel changes have essentially failed (except in some
+well-publicized special-case workloads).
+Specifically, the Xen Transcendent Memory backend allows otherwise
+"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
+virtual machines, but the pages can be compressed and deduplicated to
+optimize RAM utilization.  And when guest OS's are induced to surrender
+underutilized RAM (e.g. with "selfballooning"), sudden unexpected
+memory pressure may result in swapping; frontswap allows those pages
+to be swapped to and from hypervisor RAM (if overall host system memory
+conditions allow), thus mitigating the potentially awful performance impact
+of unplanned swapping.
+
+A KVM implementation is underway and has been RFC'ed to lkml.  And,
+using frontswap, investigation is also underway on the use of NVM as
+a memory extension technology.
+
+2) Sure there may be performance advantages in some situations, but
+   what's the space/time overhead of frontswap?
+
+If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into
+nothingness and the only overhead is a few extra bytes per swapon'ed
+swap device.  If CONFIG_FRONTSWAP is enabled but no frontswap "backend"
+registers, there is one extra global variable compared to zero for
+every swap page read or written.  If CONFIG_FRONTSWAP is enabled
+AND a frontswap backend registers AND the backend fails every "store"
+request (i.e. provides no memory despite claiming it might),
+CPU overhead is still negligible -- and since every frontswap fail
+precedes a swap page write-to-disk, the system is highly likely
+to be I/O bound and using a small fraction of a percent of a CPU
+will be irrelevant anyway.
+
+As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend
+registers, one bit is allocated for every swap page for every swap
+device that is swapon'd.  This is added to the EIGHT bits (which
+was sixteen until about 2.6.34) that the kernel already allocates
+for every swap page for every swap device that is swapon'd.  (Hugh
+Dickins has observed that frontswap could probably steal one of
+the existing eight bits, but let's worry about that minor optimization
+later.)  For very large swap disks (which are rare) on a standard
+4K pagesize, this is 1MB per 32GB swap.
+
+When swap pages are stored in transcendent memory instead of written
+out to disk, there is a side effect that this may create more memory
+pressure that can potentially outweigh the other advantages.  A
+backend, such as zcache, must implement policies to carefully (but
+dynamically) manage memory limits to ensure this doesn't happen.
+
+3) OK, how about a quick overview of what this frontswap patch does
+   in terms that a kernel hacker can grok?
+
+Let's assume that a frontswap "backend" has registered during
+kernel initialization; this registration indicates that this
+frontswap backend has access to some "memory" that is not directly
+accessible by the kernel.  Exactly how much memory it provides is
+entirely dynamic and random.
+
+Whenever a swap-device is swapon'd frontswap_init() is called,
+passing the swap device number (aka "type") as a parameter.
+This notifies frontswap to expect attempts to "store" swap pages
+associated with that number.
+
+Whenever the swap subsystem is readying a page to write to a swap
+device (c.f swap_writepage()), frontswap_store is called.  Frontswap
+consults with the frontswap backend and if the backend says it does NOT
+have room, frontswap_store returns -1 and the kernel swaps the page
+to the swap device as normal.  Note that the response from the frontswap
+backend is unpredictable to the kernel; it may choose to never accept a
+page, it could accept every ninth page, or it might accept every
+page.  But if the backend does accept a page, the data from the page
+has already been copied and associated with the type and offset,
+and the backend guarantees the persistence of the data.  In this case,
+frontswap sets a bit in the "frontswap_map" for the swap device
+corresponding to the page offset on the swap device to which it would
+otherwise have written the data.
+
+When the swap subsystem needs to swap-in a page (swap_readpage()),
+it first calls frontswap_load() which checks the frontswap_map to
+see if the page was earlier accepted by the frontswap backend.  If
+it was, the page of data is filled from the frontswap backend and
+the swap-in is complete.  If not, the normal swap-in code is
+executed to obtain the page of data from the real swap device.
+
+So every time the frontswap backend accepts a page, a swap device read
+and (potentially) a swap device write are replaced by a "frontswap backend
+store" and (possibly) a "frontswap backend loads", which are presumably much
+faster.
+
+4) Can't frontswap be configured as a "special" swap device that is
+   just higher priority than any real swap device (e.g. like zswap,
+   or maybe swap-over-nbd/NFS)?
+
+No.  First, the existing swap subsystem doesn't allow for any kind of
+swap hierarchy.  Perhaps it could be rewritten to accomodate a hierarchy,
+but this would require fairly drastic changes.  Even if it were
+rewritten, the existing swap subsystem uses the block I/O layer which
+assumes a swap device is fixed size and any page in it is linearly
+addressable.  Frontswap barely touches the existing swap subsystem,
+and works around the constraints of the block I/O subsystem to provide
+a great deal of flexibility and dynamicity.
+
+For example, the acceptance of any swap page by the frontswap backend is
+entirely unpredictable. This is critical to the definition of frontswap
+backends because it grants completely dynamic discretion to the
+backend.  In zcache, one cannot know a priori how compressible a page is.
+"Poorly" compressible pages can be rejected, and "poorly" can itself be
+defined dynamically depending on current memory constraints.
+
+Further, frontswap is entirely synchronous whereas a real swap
+device is, by definition, asynchronous and uses block I/O.  The
+block I/O layer is not only unnecessary, but may perform "optimizations"
+that are inappropriate for a RAM-oriented device including delaying
+the write of some pages for a significant amount of time.  Synchrony is
+required to ensure the dynamicity of the backend and to avoid thorny race
+conditions that would unnecessarily and greatly complicate frontswap
+and/or the block I/O subsystem.  That said, only the initial "store"
+and "load" operations need be synchronous.  A separate asynchronous thread
+is free to manipulate the pages stored by frontswap.  For example,
+the "remotification" thread in RAMster uses standard asynchronous
+kernel sockets to move compressed frontswap pages to a remote machine.
+Similarly, a KVM guest-side implementation could do in-guest compression
+and use "batched" hypercalls.
+
+In a virtualized environment, the dynamicity allows the hypervisor
+(or host OS) to do "intelligent overcommit".  For example, it can
+choose to accept pages only until host-swapping might be imminent,
+then force guests to do their own swapping.
+
+There is a downside to the transcendent memory specifications for
+frontswap:  Since any "store" might fail, there must always be a real
+slot on a real swap device to swap the page.  Thus frontswap must be
+implemented as a "shadow" to every swapon'd device with the potential
+capability of holding every page that the swap device might have held
+and the possibility that it might hold no pages at all.  This means
+that frontswap cannot contain more pages than the total of swapon'd
+swap devices.  For example, if NO swap device is configured on some
+installation, frontswap is useless.  Swapless portable devices
+can still use frontswap but a backend for such devices must configure
+some kind of "ghost" swap device and ensure that it is never used.
+
+5) Why this weird definition about "duplicate stores"?  If a page
+   has been previously successfully stored, can't it always be
+   successfully overwritten?
+
+Nearly always it can, but no, sometimes it cannot.  Consider an example
+where data is compressed and the original 4K page has been compressed
+to 1K.  Now an attempt is made to overwrite the page with data that
+is non-compressible and so would take the entire 4K.  But the backend
+has no more space.  In this case, the store must be rejected.  Whenever
+frontswap rejects a store that would overwrite, it also must invalidate
+the old data and ensure that it is no longer accessible.  Since the
+swap subsystem then writes the new data to the read swap device,
+this is the correct course of action to ensure coherency.
+
+6) What is frontswap_shrink for?
+
+When the (non-frontswap) swap subsystem swaps out a page to a real
+swap device, that page is only taking up low-value pre-allocated disk
+space.  But if frontswap has placed a page in transcendent memory, that
+page may be taking up valuable real estate.  The frontswap_shrink
+routine allows code outside of the swap subsystem to force pages out
+of the memory managed by frontswap and back into kernel-addressable memory.
+For example, in RAMster, a "suction driver" thread will attempt
+to "repatriate" pages sent to a remote machine back to the local machine;
+this is driven using the frontswap_shrink mechanism when memory pressure
+subsides.
+
+7) Why does the frontswap patch create the new include file swapfile.h?
+
+The frontswap code depends on some swap-subsystem-internal data
+structures that have, over the years, moved back and forth between
+static and global.  This seemed a reasonable compromise:  Define
+them as global but declare them in a new include file that isn't
+included by the large number of source files that include swap.h.
+
+Dan Magenheimer, last updated April 9, 2012
index 4600cbe3d6beabc7e9fb77c147951bcda70e6b46..7587493c67f11e809861b0e592c5d8458ab75030 100644 (file)
@@ -16,7 +16,7 @@ There are three components to pagemap:
     * Bits 0-4   swap type if swapped
     * Bits 5-54  swap offset if swapped
     * Bits 55-60 page shift (page size = 1<<page shift)
-    * Bit  61    reserved for future use
+    * Bit  61    page is file-page or shared-anon
     * Bit  62    page swapped
     * Bit  63    page present
 
index 6752870c4970d73721c48a041b15aa36af929ab5..b0c6d1bbb43444096fe31b4281d60456febfb704 100644 (file)
@@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists
 slabs that have data in them. See "slabinfo -h" for more options when
 running the command. slabinfo can be compiled with
 
-gcc -o slabinfo tools/slub/slabinfo.c
+gcc -o slabinfo tools/vm/slabinfo.c
 
 Some of the modes of operation of slabinfo require that slub debugging
 be enabled on the command line. F.e. no tracking information will be
index 25fe4304f2fcb7b21b4065de12cfa5af4ad9f00b..086638f6c82d2e37d92e2aefa94243c5b1b74b13 100644 (file)
@@ -1,6 +1,6 @@
 The Linux WatchDog Timer Driver Core kernel API.
 ===============================================
-Last reviewed: 16-Mar-2012
+Last reviewed: 22-May-2012
 
 Wim Van Sebroeck <wim@iguana.be>
 
@@ -39,6 +39,10 @@ watchdog_device structure.
 The watchdog device structure looks like this:
 
 struct watchdog_device {
+       int id;
+       struct cdev cdev;
+       struct device *dev;
+       struct device *parent;
        const struct watchdog_info *info;
        const struct watchdog_ops *ops;
        unsigned int bootstatus;
@@ -46,10 +50,20 @@ struct watchdog_device {
        unsigned int min_timeout;
        unsigned int max_timeout;
        void *driver_data;
+       struct mutex lock;
        unsigned long status;
 };
 
 It contains following fields:
+* id: set by watchdog_register_device, id 0 is special. It has both a
+  /dev/watchdog0 cdev (dynamic major, minor 0) as well as the old
+  /dev/watchdog miscdev. The id is set automatically when calling
+  watchdog_register_device.
+* cdev: cdev for the dynamic /dev/watchdog<id> device nodes. This
+  field is also populated by watchdog_register_device.
+* dev: device under the watchdog class (created by watchdog_register_device).
+* parent: set this to the parent device (or NULL) before calling
+  watchdog_register_device.
 * info: a pointer to a watchdog_info structure. This structure gives some
   additional information about the watchdog timer itself. (Like it's unique name)
 * ops: a pointer to the list of watchdog operations that the watchdog supports.
@@ -61,6 +75,7 @@ It contains following fields:
 * driver_data: a pointer to the drivers private data of a watchdog device.
   This data should only be accessed via the watchdog_set_drvdata and
   watchdog_get_drvdata routines.
+* lock: Mutex for WatchDog Timer Driver Core internal use only.
 * status: this field contains a number of status bits that give extra
   information about the status of the device (Like: is the watchdog timer
   running/active, is the nowayout bit set, is the device opened via
@@ -78,6 +93,8 @@ struct watchdog_ops {
        unsigned int (*status)(struct watchdog_device *);
        int (*set_timeout)(struct watchdog_device *, unsigned int);
        unsigned int (*get_timeleft)(struct watchdog_device *);
+       void (*ref)(struct watchdog_device *);
+       void (*unref)(struct watchdog_device *);
        long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
 };
 
@@ -85,6 +102,21 @@ It is important that you first define the module owner of the watchdog timer
 driver's operations. This module owner will be used to lock the module when
 the watchdog is active. (This to avoid a system crash when you unload the
 module and /dev/watchdog is still open).
+
+If the watchdog_device struct is dynamically allocated, just locking the module
+is not enough and a driver also needs to define the ref and unref operations to
+ensure the structure holding the watchdog_device does not go away.
+
+The simplest (and usually sufficient) implementation of this is to:
+1) Add a kref struct to the same structure which is holding the watchdog_device
+2) Define a release callback for the kref which frees the struct holding both
+3) Call kref_init on this kref *before* calling watchdog_register_device()
+4) Define a ref operation calling kref_get on this kref
+5) Define a unref operation calling kref_put on this kref
+6) When it is time to cleanup:
+ * Do not kfree() the struct holding both, the last kref_put will do this!
+ * *After* calling watchdog_unregister_device() call kref_put on the kref
+
 Some operations are mandatory and some are optional. The mandatory operations
 are:
 * start: this is a pointer to the routine that starts the watchdog timer
@@ -125,6 +157,10 @@ they are supported. These optional routines/operations are:
   (Note: the WDIOF_SETTIMEOUT needs to be set in the options field of the
   watchdog's info structure).
 * get_timeleft: this routines returns the time that's left before a reset.
+* ref: the operation that calls kref_get on the kref of a dynamically
+  allocated watchdog_device struct.
+* unref: the operation that calls kref_put on the kref of a dynamically
+  allocated watchdog_device struct.
 * ioctl: if this routine is present then it will be called first before we do
   our own internal ioctl call handling. This routine should return -ENOIOCTLCMD
   if a command is not supported. The parameters that are passed to the ioctl
@@ -144,6 +180,11 @@ bit-operations. The status bits that are defined are:
   (This bit should only be used by the WatchDog Timer Driver Core).
 * WDOG_NO_WAY_OUT: this bit stores the nowayout setting for the watchdog.
   If this bit is set then the watchdog timer will not be able to stop.
+* WDOG_UNREGISTERED: this bit gets set by the WatchDog Timer Driver Core
+  after calling watchdog_unregister_device, and then checked before calling
+  any watchdog_ops, so that you can be sure that no operations (other then
+  unref) will get called after unregister, even if userspace still holds a
+  reference to /dev/watchdog
 
   To set the WDOG_NO_WAY_OUT status bit (before registering your watchdog
   timer device) you can either:
index 17ddd822b4563c2cdf80ba80f422db5b4b35e002..04fddbacdbde74a03ae18ea6d91e5c0f452c0b44 100644 (file)
@@ -78,6 +78,11 @@ wd0_timeout: Default watchdog0 timeout in 1/10secs
 wd1_timeout: Default watchdog1 timeout in 1/10secs
 wd2_timeout: Default watchdog2 timeout in 1/10secs
 -------------------------------------------------
+da9052wdt:
+timeout: Watchdog timeout in seconds. 2<= timeout <=131, default=2.048s
+nowayout: Watchdog cannot be stopped once started
+       (default=kernel config parameter)
+-------------------------------------------------
 davinci_wdt:
 heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
 -------------------------------------------------
diff --git a/Documentation/x86/efi-stub.txt b/Documentation/x86/efi-stub.txt
new file mode 100644 (file)
index 0000000..44e6bb6
--- /dev/null
@@ -0,0 +1,65 @@
+                         The EFI Boot Stub
+                    ---------------------------
+
+On the x86 platform, a bzImage can masquerade as a PE/COFF image,
+thereby convincing EFI firmware loaders to load it as an EFI
+executable. The code that modifies the bzImage header, along with the
+EFI-specific entry point that the firmware loader jumps to are
+collectively known as the "EFI boot stub", and live in
+arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c,
+respectively.
+
+By using the EFI boot stub it's possible to boot a Linux kernel
+without the use of a conventional EFI boot loader, such as grub or
+elilo. Since the EFI boot stub performs the jobs of a boot loader, in
+a certain sense it *IS* the boot loader.
+
+The EFI boot stub is enabled with the CONFIG_EFI_STUB kernel option.
+
+
+**** How to install bzImage.efi
+
+The bzImage located in arch/x86/boot/bzImage must be copied to the EFI
+System Partiion (ESP) and renamed with the extension ".efi". Without
+the extension the EFI firmware loader will refuse to execute it. It's
+not possible to execute bzImage.efi from the usual Linux file systems
+because EFI firmware doesn't have support for them.
+
+
+**** Passing kernel parameters from the EFI shell
+
+Arguments to the kernel can be passed after bzImage.efi, e.g.
+
+       fs0:> bzImage.efi console=ttyS0 root=/dev/sda4
+
+
+**** The "initrd=" option
+
+Like most boot loaders, the EFI stub allows the user to specify
+multiple initrd files using the "initrd=" option. This is the only EFI
+stub-specific command line parameter, everything else is passed to the
+kernel when it boots.
+
+The path to the initrd file must be an absolute path from the
+beginning of the ESP, relative path names do not work. Also, the path
+is an EFI-style path and directory elements must be separated with
+backslashes (\). For example, given the following directory layout,
+
+fs0:>
+       Kernels\
+                       bzImage.efi
+                       initrd-large.img
+
+       Ramdisks\
+                       initrd-small.img
+                       initrd-medium.img
+
+to boot with the initrd-large.img file if the current working
+directory is fs0:\Kernels, the following command must be used,
+
+       fs0:\Kernels> bzImage.efi initrd=\Kernels\initrd-large.img
+
+Notice how bzImage.efi can be specified with a relative path. That's
+because the image we're executing is interpreted by the EFI shell,
+which understands relative paths, whereas the rest of the command line
+is passed to bzImage.efi.
index 64e675d6d478a3a1444b405537b8406d6b7f1b3b..fe643e7b9df6360442ba2bfeb653a971da83b0f4 100644 (file)
@@ -579,7 +579,7 @@ F:  drivers/net/appletalk/
 F:     net/appletalk/
 
 ARASAN COMPACT FLASH PATA CONTROLLER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     include/linux/pata_arasan_cf_data.h
@@ -1077,7 +1077,7 @@ F:        drivers/media/video/s5p-fimc/
 ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
 M:     Kyungmin Park <kyungmin.park@samsung.com>
 M:     Kamil Debski <k.debski@samsung.com>
-M:     Jeongtae Park <jtp.park@samsung.com>
+M:     Jeongtae Park <jtp.park@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-media@vger.kernel.org
 S:     Maintained
@@ -1646,11 +1646,11 @@ S:      Maintained
 F:     drivers/gpio/gpio-bt8xx.c
 
 BTRFS FILE SYSTEM
-M:     Chris Mason <chris.mason@oracle.com>
+M:     Chris Mason <chris.mason@fusionio.com>
 L:     linux-btrfs@vger.kernel.org
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
 S:     Maintained
 F:     Documentation/filesystems/btrfs.txt
 F:     fs/btrfs/
@@ -1743,10 +1743,10 @@ F:      include/linux/can/platform/
 CAPABILITIES
 M:     Serge Hallyn <serge.hallyn@canonical.com>
 L:     linux-security-module@vger.kernel.org
-S:     Supported       
+S:     Supported
 F:     include/linux/capability.h
 F:     security/capability.c
-F:     security/commoncap.c 
+F:     security/commoncap.c
 F:     kernel/capability.c
 
 CELL BROADBAND ENGINE ARCHITECTURE
@@ -1800,6 +1800,9 @@ F:        include/linux/cfag12864b.h
 CFG80211 and NL80211
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
+W:     http://wireless.kernel.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     include/linux/nl80211.h
 F:     include/net/cfg80211.h
@@ -2146,11 +2149,11 @@ S:      Orphan
 F:     drivers/net/wan/pc300*
 
 CYTTSP TOUCHSCREEN DRIVER
-M:      Javier Martinez Canillas <javier@dowhile0.org>
-L:      linux-input@vger.kernel.org
-S:      Maintained
-F:      drivers/input/touchscreen/cyttsp*
-F:      include/linux/input/cyttsp.h
+M:     Javier Martinez Canillas <javier@dowhile0.org>
+L:     linux-input@vger.kernel.org
+S:     Maintained
+F:     drivers/input/touchscreen/cyttsp*
+F:     include/linux/input/cyttsp.h
 
 DAMA SLAVE for AX.25
 M:     Joerg Reuter <jreuter@yaina.de>
@@ -2270,7 +2273,7 @@ F:        include/linux/device-mapper.h
 F:     include/linux/dm-*.h
 
 DIOLAN U2C-12 I2C DRIVER
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-diolan-u2c.c
@@ -2930,6 +2933,13 @@ F:       Documentation/power/freezing-of-tasks.txt
 F:     include/linux/freezer.h
 F:     kernel/freezer.c
 
+FRONTSWAP API
+M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     mm/frontswap.c
+F:     include/linux/frontswap.h
+
 FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
 M:     David Howells <dhowells@redhat.com>
 L:     linux-cachefs@redhat.com
@@ -3138,7 +3148,7 @@ F:        drivers/tty/hvc/
 
 HARDWARE MONITORING
 M:     Jean Delvare <khali@linux-fr.org>
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
 T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
@@ -3423,13 +3433,14 @@ S:      Supported
 F:     drivers/idle/i7300_idle.c
 
 IEEE 802.15.4 SUBSYSTEM
+M:     Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
 M:     Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-M:     Sergey Lapin <slapin@ossfans.org>
 L:     linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://apps.sourceforge.net/trac/linux-zigbee
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
 S:     Maintained
 F:     net/ieee802154/
+F:     net/mac802154/
 F:     drivers/ieee802154/
 
 IIO SUBSYSTEM AND DRIVERS
@@ -4096,6 +4107,8 @@ F:        drivers/scsi/53c700*
 LED SUBSYSTEM
 M:     Bryan Wu <bryan.wu@canonical.com>
 M:     Richard Purdie <rpurdie@rpsys.net>
+L:     linux-leds@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
 S:     Maintained
 F:     drivers/leds/
 F:     include/linux/leds.h
@@ -4340,7 +4353,8 @@ MAC80211
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
 W:     http://linuxwireless.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     Documentation/networking/mac80211-injection.txt
 F:     include/net/mac80211.h
@@ -4351,7 +4365,8 @@ M:        Stefano Brivio <stefano.brivio@polimi.it>
 M:     Mattias Nissler <mattias.nissler@gmx.de>
 L:     linux-wireless@vger.kernel.org
 W:     http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     net/mac80211/rc80211_pid*
 
@@ -4411,6 +4426,13 @@ S:       Orphan
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
+MAX16065 HARDWARE MONITOR DRIVER
+M:     Guenter Roeck <linux@roeck-us.net>
+L:     lm-sensors@lm-sensors.org
+S:     Maintained
+F:     Documentation/hwmon/max16065
+F:     drivers/hwmon/max16065.c
+
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     lm-sensors@lm-sensors.org
@@ -4633,8 +4655,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T:     git git://1984.lsi.us.es/nf
+T:     git git://1984.lsi.us.es/nf-next
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
@@ -4836,6 +4858,7 @@ M:        Kevin Hilman <khilman@ti.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/*omap*/*pm*
+F:     drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
 M:     Rajendra Nayak <rnayak@ti.com>
@@ -5149,7 +5172,7 @@ F:        drivers/leds/leds-pca9532.c
 F:     include/linux/leds-pca9532.h
 
 PCA9541 I2C BUS MASTER SELECTOR DRIVER
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -5169,7 +5192,7 @@ S:        Maintained
 F:     drivers/firmware/pcdp.*
 
 PCI ERROR RECOVERY
-M:     Linas Vepstas <linasvepstas@gmail.com>
+M:     Linas Vepstas <linasvepstas@gmail.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     Documentation/PCI/pci-error-recovery.txt
@@ -5275,7 +5298,7 @@ S:        Maintained
 F:     drivers/pinctrl/
 
 PIN CONTROLLER - ST SPEAR
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -5299,7 +5322,7 @@ F:        drivers/video/fb-puv3.c
 F:     drivers/rtc/rtc-puv3.c
 
 PMBUS HARDWARE MONITORING DRIVERS
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
 W:     http://www.roeck-us.net/linux/drivers/
@@ -5337,7 +5360,7 @@ M:        David Woodhouse <dwmw2@infradead.org>
 T:     git git://git.infradead.org/battery-2.6.git
 S:     Maintained
 F:     include/linux/power_supply.h
-F:     drivers/power/power_supply*
+F:     drivers/power/
 
 PNP SUPPORT
 M:     Adam Belay <abelay@mit.edu>
@@ -5542,7 +5565,7 @@ F:        Documentation/networking/LICENSE.qla3xxx
 F:     drivers/net/ethernet/qlogic/qla3xxx.*
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Anirban Chakraborty <anirban.chakraborty@qlogic.com>
+M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Sony Chacko <sony.chacko@qlogic.com>
 M:     linux-driver@qlogic.com
 L:     netdev@vger.kernel.org
@@ -5550,7 +5573,6 @@ S:        Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Anirban Chakraborty <anirban.chakraborty@qlogic.com>
 M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Ron Mercer <ron.mercer@qlogic.com>
 M:     linux-driver@qlogic.com
@@ -5695,6 +5717,9 @@ F:        include/linux/remoteproc.h
 RFKILL
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
+W:     http://wireless.kernel.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     Documentation/rfkill.txt
 F:     net/rfkill/
@@ -5849,7 +5874,7 @@ S:        Maintained
 F:     drivers/tty/serial
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 S:     Maintained
 F:     include/linux/dw_dmac.h
 F:     drivers/dma/dw_dmac_regs.h
@@ -5885,7 +5910,7 @@ M:        Ingo Molnar <mingo@redhat.com>
 M:     Peter Zijlstra <peterz@infradead.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
-F:     kernel/sched*
+F:     kernel/sched/
 F:     include/linux/sched.h
 
 SCORE ARCHITECTURE
@@ -5997,7 +6022,7 @@ S:        Maintained
 F:     drivers/mmc/host/sdhci-s3c.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
@@ -6353,7 +6378,7 @@ S:        Maintained
 F:     include/linux/compiler.h
 
 SPEAR PLATFORM SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6362,7 +6387,7 @@ S:        Maintained
 F:     arch/arm/plat-spear/
 
 SPEAR13XX MACHINE SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6371,7 +6396,7 @@ S:        Maintained
 F:     arch/arm/mach-spear13xx/
 
 SPEAR3XX MACHINE SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6382,7 +6407,7 @@ F:        arch/arm/mach-spear3xx/
 SPEAR6XX MACHINE SUPPORT
 M:     Rajeev Kumar <rajeev-dlh.kumar@st.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -6390,7 +6415,7 @@ S:        Maintained
 F:     arch/arm/mach-spear6xx/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -6657,7 +6682,7 @@ F:        include/linux/taskstats*
 F:     kernel/taskstats.c
 
 TC CLASSIFIER
-M:     Jamal Hadi Salim <hadi@cyberus.ca>
+M:     Jamal Hadi Salim <jhs@mojatatu.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     include/linux/pkt_cls.h
@@ -7291,11 +7316,11 @@ F:      Documentation/DocBook/uio-howto.tmpl
 F:     drivers/uio/
 F:     include/linux/uio*.h
 
-UTIL-LINUX-NG PACKAGE
+UTIL-LINUX PACKAGE
 M:     Karel Zak <kzak@redhat.com>
-L:     util-linux-ng@vger.kernel.org
-W:     http://kernel.org/~kzak/util-linux-ng/
-T:     git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git
+L:     util-linux@vger.kernel.org
+W:     http://en.wikipedia.org/wiki/Util-linux
+T:     git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
 S:     Maintained
 
 UVESAFB DRIVER
@@ -7397,7 +7422,7 @@ F:        include/linux/vlynq.h
 
 VME SUBSYSTEM
 M:     Martyn Welch <martyn.welch@ge.com>
-M:     Manohar Vanga <manohar.vanga@cern.ch>
+M:     Manohar Vanga <manohar.vanga@gmail.com>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     devel@driverdev.osuosl.org
 S:     Maintained
index dda21c3efc7b1495f335eaecb8a0d52613614aa0..4bb09e1b1230d33d9328c2a67ba32aff22ebde7d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
 VERSION = 3
-PATCHLEVEL = 4
+PATCHLEVEL = 5
 SUBLEVEL = 0
 EXTRAVERSION =
 NAME = Saber-toothed Squirrel
@@ -561,6 +561,8 @@ else
 KBUILD_CFLAGS  += -O2
 endif
 
+include $(srctree)/arch/$(SRCARCH)/Makefile
+
 ifdef CONFIG_READABLE_ASM
 # Disable optimizations that make assembler listings hard to read.
 # reorder blocks reorders the control in the function
@@ -571,8 +573,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-reorder-blocks,) \
                  $(call cc-option,-fno-partial-inlining)
 endif
 
-include $(srctree)/arch/$(SRCARCH)/Makefile
-
 ifneq ($(CONFIG_FRAME_WARN),0)
 KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
 endif
index 24779fc95994efb5c4d69e4d507f3cba581570a4..5a8a48320efe9f5c577f9cc8363a4de2a6725559 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned int   __kernel_ino_t;
 #define __kernel_ino_t __kernel_ino_t
 
-typedef unsigned int   __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned long  __kernel_sigset_t;      /* at least 32 bits */
 
 #include <asm-generic/posix_types.h>
index 10ab2d74ecbbede2764c8bf8cbcca14f19f418a0..a8c97d42ec8eaef9215c32a40f743bd94505bc55 100644 (file)
@@ -226,7 +226,6 @@ do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs,
        if (__get_user(set.sig[0], &sc->sc_mask))
                goto give_sigsegv;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(sc, regs, sw))
@@ -261,7 +260,6 @@ do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto give_sigsegv;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
@@ -468,12 +466,9 @@ static inline void
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
              struct pt_regs * regs, struct switch_stack *sw)
 {
-       sigset_t *oldset = &current->blocked;
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-
        if (ka->sa.sa_flags & SA_SIGINFO)
                ret = setup_rt_frame(sig, ka, info, oldset, regs, sw);
        else
@@ -483,12 +478,7 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
                force_sigsegv(sig, current);
                return;
        }
-       block_sigmask(ka, sig);
-       /* A signal was successfully delivered, and the
-          saved sigmask was stored on the signal frame,
-          and will be restored by sigreturn.  So we can
-          simply clear the restore sigmask flag.  */
-       clear_thread_flag(TIF_RESTORE_SIGMASK);
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 static inline void
@@ -572,9 +562,7 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw,
        }
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
-
+       restore_saved_sigmask();
        if (single_stepping)
                ptrace_set_bpt(current);        /* re-set breakpoint */
 }
@@ -590,7 +578,5 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 5e7601301b416c29fe3fa0905ee90a9ac4931e94..a91009c6187062253579d0324292ade00ea2241c 100644 (file)
@@ -7,7 +7,6 @@ config ARM
        select HAVE_IDE if PCI || ISA || PCMCIA
        select HAVE_DMA_ATTRS
        select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
-       select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
@@ -294,6 +293,7 @@ config ARCH_VERSATILE
        select ICST
        select GENERIC_CLOCKEVENTS
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select NEED_MACH_IO_H if PCI
        select PLAT_VERSATILE
        select PLAT_VERSATILE_CLCD
        select PLAT_VERSATILE_FPGA_IRQ
@@ -525,7 +525,7 @@ config ARCH_IXP4XX
        select ARCH_HAS_DMA_SET_COHERENT_MASK
        select CLKSRC_MMIO
        select CPU_XSCALE
-       select GENERIC_GPIO
+       select ARCH_REQUIRE_GPIOLIB
        select GENERIC_CLOCKEVENTS
        select MIGHT_HAVE_PCI
        select NEED_MACH_IO_H
@@ -589,6 +589,7 @@ config ARCH_ORION5X
        select PCI
        select ARCH_REQUIRE_GPIOLIB
        select GENERIC_CLOCKEVENTS
+       select NEED_MACH_IO_H
        select PLAT_ORION
        help
          Support for the following Marvell Orion 5x series SoCs:
index 881bc398784483169535a156158a4b2598beb23a..4ad5160018cb522922201b3dfebf9c0462b6ef85 100644 (file)
@@ -58,6 +58,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8012e000 0x80>;
                        interrupts = <0 119 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
@@ -69,6 +71,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8012e080 0x80>;
                        interrupts = <0 120 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
@@ -80,6 +84,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8000e000 0x80>;
                        interrupts = <0 121 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
@@ -91,6 +97,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8000e080 0x80>;
                        interrupts = <0 122 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8000e100 0x80>;
                        interrupts = <0 123 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8000e180 0x80>;
                        interrupts = <0 124 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8011e000 0x80>;
                        interrupts = <0 125 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8011e080 0x80>;
                        interrupts = <0 126 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0xa03fe000 0x80>;
                        interrupts = <0 127 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                        gpio-bank = <8>;
                };
 
+               pinctrl {
+                       compatible = "stericsson,nmk_pinctrl";
+               };
+
                usb@a03e0000 {
                        compatible = "stericsson,db8500-musb",
                                "mentor,musb";
                prcmu@80157000 {
                        compatible = "stericsson,db8500-prcmu";
                        reg = <0x80157000 0x1000>;
-                       interrupts = <46 47>;
+                       interrupts = <0 47 0x4>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
 
-                               prcmu-timer-4@80157450 {
+                       prcmu-timer-4@80157450 {
                                compatible = "stericsson,db8500-prcmu-timer-4";
                                reg = <0x80157450 0xC>;
                        };
 
+                       db8500-prcmu-regulators {
+                               compatible = "stericsson,db8500-prcmu-regulator";
+
+                               // DB8500_REGULATOR_VAPE
+                               db8500_vape_reg: db8500_vape {
+                                       regulator-name = "db8500-vape";
+                                       regulator-always-on;
+                               };
+
+                               // DB8500_REGULATOR_VARM
+                               db8500_varm_reg: db8500_varm {
+                                       regulator-name = "db8500-varm";
+                               };
+
+                               // DB8500_REGULATOR_VMODEM
+                               db8500_vmodem_reg: db8500_vmodem {
+                                       regulator-name = "db8500-vmodem";
+                               };
+
+                               // DB8500_REGULATOR_VPLL
+                               db8500_vpll_reg: db8500_vpll {
+                                       regulator-name = "db8500-vpll";
+                               };
+
+                               // DB8500_REGULATOR_VSMPS1
+                               db8500_vsmps1_reg: db8500_vsmps1 {
+                                       regulator-name = "db8500-vsmps1";
+                               };
+
+                               // DB8500_REGULATOR_VSMPS2
+                               db8500_vsmps2_reg: db8500_vsmps2 {
+                                       regulator-name = "db8500-vsmps2";
+                               };
+
+                               // DB8500_REGULATOR_VSMPS3
+                               db8500_vsmps3_reg: db8500_vsmps3 {
+                                       regulator-name = "db8500-vsmps3";
+                               };
+
+                               // DB8500_REGULATOR_VRF1
+                               db8500_vrf1_reg: db8500_vrf1 {
+                                       regulator-name = "db8500-vrf1";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SVAMMDSP
+                               db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
+                                       regulator-name = "db8500-sva-mmdsp";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SVAMMDSPRET
+                               db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
+                                       regulator-name = "db8500-sva-mmdsp-ret";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SVAPIPE
+                               db8500_sva_pipe_reg: db8500_sva_pipe {
+                                       regulator-name = "db8500_sva_pipe";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SIAMMDSP
+                               db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
+                                       regulator-name = "db8500_sia_mmdsp";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SIAMMDSPRET
+                               db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
+                                       regulator-name = "db8500-sia-mmdsp-ret";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SIAPIPE
+                               db8500_sia_pipe_reg: db8500_sia_pipe {
+                                       regulator-name = "db8500-sia-pipe";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SGA
+                               db8500_sga_reg: db8500_sga {
+                                       regulator-name = "db8500-sga";
+                                       vin-supply = <&db8500_vape_reg>;
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_B2R2_MCDE
+                               db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
+                                       regulator-name = "db8500-b2r2-mcde";
+                                       vin-supply = <&db8500_vape_reg>;
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM12
+                               db8500_esram12_reg: db8500_esram12 {
+                                       regulator-name = "db8500-esram12";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM12RET
+                               db8500_esram12_ret_reg: db8500_esram12_ret {
+                                       regulator-name = "db8500-esram12-ret";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM34
+                               db8500_esram34_reg: db8500_esram34 {
+                                       regulator-name = "db8500-esram34";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM34RET
+                               db8500_esram34_ret_reg: db8500_esram34_ret {
+                                       regulator-name = "db8500-esram34-ret";
+                               };
+                       };
+
                        ab8500@5 {
                                compatible = "stericsson,ab8500";
                                reg = <5>; /* mailbox 5 is i2c */
                                interrupts = <0 40 0x4>;
+
+                               ab8500-regulators {
+                                       compatible = "stericsson,ab8500-regulator";
+
+                                       // supplies to the display/camera
+                                       ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+                                               regulator-name = "V-DISPLAY";
+                                               regulator-min-microvolt = <2500000>;
+                                               regulator-max-microvolt = <2900000>;
+                                               regulator-boot-on;
+                                               /* BUG: If turned off MMC will be affected. */
+                                               regulator-always-on;
+                                       };
+
+                                       // supplies to the on-board eMMC
+                                       ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+                                               regulator-name = "V-eMMC1";
+                                               regulator-min-microvolt = <1100000>;
+                                               regulator-max-microvolt = <3300000>;
+                                       };
+
+                                       // supply for VAUX3; SDcard slots
+                                       ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+                                               regulator-name = "V-MMC-SD";
+                                               regulator-min-microvolt = <1100000>;
+                                               regulator-max-microvolt = <3300000>;
+                                       };
+
+                                       // supply for v-intcore12; VINTCORE12 LDO
+                                       ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
+                                               regulator-name = "V-INTCORE";
+                                       };
+
+                                       // supply for tvout; gpadc; TVOUT LDO
+                                       ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
+                                               regulator-name = "V-TVOUT";
+                                       };
+
+                                       // supply for ab8500-usb; USB LDO
+                                       ab8500_ldo_usb_reg: ab8500_ldo_usb {
+                                               regulator-name = "dummy";
+                                       };
+
+                                       // supply for ab8500-vaudio; VAUDIO LDO
+                                       ab8500_ldo_audio_reg: ab8500_ldo_audio {
+                                               regulator-name = "V-AUD";
+                                       };
+
+                                       // supply for v-anamic1 VAMic1-LDO
+                                       ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+                                               regulator-name = "V-AMIC1";
+                                       };
+
+                                       // supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1
+                                       ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
+                                               regulator-name = "V-AMIC2";
+                                       };
+
+                                       // supply for v-dmic; VDMIC LDO
+                                       ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
+                                               regulator-name = "V-DMIC";
+                                       };
+
+                                       // supply for U8500 CSI/DSI; VANA LDO
+                                       ab8500_ldo_ana_reg: ab8500_ldo_ana {
+                                               regulator-name = "V-CSI/DSI";
+                                       };
+                               };
                        };
                };
 
                        status = "disabled";
 
                        // Add one of these for each child device
-                       cs-gpios = <&gpio0 31 &gpio4 14 &gpio4 16 &gpio6 22 &gpio7 0>;
+                       cs-gpios = <&gpio0 31 0x4 &gpio4 14 0x4 &gpio4 16 0x4
+                                   &gpio6 22 0x4 &gpio7 0 0x4>;
 
                };
 
index 5ca0cdb76413e5e90f03787d1527930bafd81d73..4272b2949228ba2e4ea9d7ebb7bc2285fd9bb19b 100644 (file)
                reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
        };
 
+       combiner:interrupt-controller@10440000 {
+               compatible = "samsung,exynos4210-combiner";
+               #interrupt-cells = <2>;
+               interrupt-controller;
+               samsung,combiner-nr = <32>;
+               reg = <0x10440000 0x1000>;
+               interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+                            <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+                            <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+                            <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
+                            <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
+                            <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
+                            <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
+                            <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
+       };
+
        watchdog {
                compatible = "samsung,s3c2410-wdt";
                reg = <0x101D0000 0x100>;
index 2b1a166d41f9f850dbaa475bef02de4a70d35c6a..386c769c38d179dcb090bba33af5aeb6ec2f0a0a 100644 (file)
                                status = "disabled";
                        };
                };
+               nand@d8000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       compatible = "fsl,imx27-nand";
+                       reg = <0xd8000000 0x1000>;
+                       interrupts = <29>;
+                       status = "disabled";
+               };
        };
 };
index 2d696866f71c4117973b8055d3ebfbc25b16786d..3f5dad801a9806ad3173a0d06dedc0c4a37e8388 100644 (file)
                        gpio: gpio@40028000 {
                                compatible = "nxp,lpc3220-gpio";
                                reg = <0x40028000 0x1000>;
-                               /* create a private address space for enumeration */
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               gpio_p0: gpio-bank@0 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <0>;
-                               };
-
-                               gpio_p1: gpio-bank@1 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <1>;
-                               };
-
-                               gpio_p2: gpio-bank@2 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <2>;
-                               };
-
-                               gpio_p3: gpio-bank@3 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <3>;
-                               };
-
-                               gpi_p3: gpio-bank@4 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <4>;
-                               };
-
-                               gpo_p3: gpio-bank@5 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <5>;
-                               };
+                               gpio-controller;
+                               #gpio-cells = <3>; /* bank, pin, flags */
                        };
 
                        watchdog@4003C000 {
index 153a4b2d12b58093d8be229587319523bbcf255e..c9b4f27d191e1c61c7099da10910b69d24c34b8d 100644 (file)
@@ -11,7 +11,7 @@
 /include/ "mmp2.dtsi"
 
 / {
-       model = "Marvell MMP2 Aspenite Development Board";
+       model = "Marvell MMP2 Brownstone Development Board";
        compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2";
 
        chosen {
@@ -19,7 +19,7 @@
        };
 
        memory {
-               reg = <0x00000000 0x04000000>;
+               reg = <0x00000000 0x08000000>;
        };
 
        soc {
index f2ab4ea7cc0ee19c5863a1c57fd0ac9c6142a9cc..581cb081cb0f04586f98e8b10bdf1e095a3b65a0 100644 (file)
@@ -44,6 +44,8 @@
                        compatible = "ti,omap2-intc";
                        interrupt-controller;
                        #interrupt-cells = <1>;
+                       ti,intc-size = <96>;
+                       reg = <0x480FE000 0x1000>;
                };
 
                uart1: serial@4806a000 {
index 0167e86314c011bc8dc6141a4a3f2cbef6d4dd36..c4ff6d1a018bbee575fd99432c518d0ed530e769 100644 (file)
                compatible = "gpio-leds";
 
                led0 {
-                       gpios = <&gpo_p3 1 1>; /* GPO_P3 1, GPIO 80, active low */
+                       gpios = <&gpio 5 1 1>; /* GPO_P3 1, GPIO 80, active low */
                        linux,default-trigger = "heartbeat";
                        default-state = "off";
                };
 
                led1 {
-                       gpios = <&gpo_p3 14 1>; /* GPO_P3 14, GPIO 93, active low */
+                       gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
                        linux,default-trigger = "timer";
                        default-state = "off";
                };
index d99dc04f0d910813ddd2561ce0f818070dfe7935..ec3c339751104c43594062c30a47f2601b02b731 100644 (file)
                reg = <0x00000000 0x20000000>;
        };
 
+       en_3v3_reg: en_3v3 {
+               compatible = "regulator-fixed";
+                regulator-name = "en-3v3-fixed-supply";
+                regulator-min-microvolt = <3300000>;
+                regulator-max-microvolt = <3300000>;
+                gpios = <&gpio0 26  0x4>; // 26
+                startup-delay-us = <5000>;
+                enable-active-high;
+       };
+
        gpio_keys {
                compatible = "gpio-keys";
                #address-cells = <1>;
                        wakeup = <1>;
                        linux,code = <2>;
                        label = "userpb";
-                       gpios = <&gpio1 0 0>;
+                       gpios = <&gpio1 0 0x4>;
                };
                button@2 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <3>;
                        label = "extkb1";
-                       gpios = <&gpio4 23 0>;
+                       gpios = <&gpio4 23 0x4>;
                };
                button@3 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <4>;
                        label = "extkb2";
-                       gpios = <&gpio4 24 0>;
+                       gpios = <&gpio4 24 0x4>;
                };
                button@4 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <5>;
                        label = "extkb3";
-                       gpios = <&gpio5 1 0>;
+                       gpios = <&gpio5 1 0x4>;
                };
                button@5 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <6>;
                        label = "extkb4";
-                       gpios = <&gpio5 2 0>;
+                       gpios = <&gpio5 2 0x4>;
                };
        };
 
                compatible = "gpio-leds";
                used-led {
                        label = "user_led";
-                       gpios = <&gpio4 14>;
+                       gpios = <&gpio4 14 0x4>;
                };
        };
 
        soc-u9500 {
-
                external-bus@50000000 {
                        status = "okay";
 
@@ -80,6 +89,9 @@
                                reg = <0 0x10000>;
                                interrupts = <12 0x1>;
                                interrupt-parent = <&gpio4>;
+                               vdd33a-supply = <&en_3v3_reg>;
+                               vddvario-supply = <&db8500_vape_reg>;
+
 
                                reg-shift = <1>;
                                reg-io-width = <2>;
 
                sdi@80126000 {
                        status = "enabled";
-                       cd-gpios = <&gpio6 26>;
+                       vmmc-supply = <&ab8500_ldo_aux3_reg>;
+                       cd-gpios  = <&gpio6 26 0x4>; // 218
                };
 
                sdi@80114000 {
                        status = "enabled";
+                       vmmc-supply = <&ab8500_ldo_aux2_reg>;
                };
 
                uart@80120000 {
                        tc3589x@42 {
                                //compatible = "tc3589x";
                                reg = <0x42>;
-                               interrupts = <25>;
+                               gpios = <&gpio6 25 0x4>;
                                interrupt-parent = <&gpio6>;
                        };
                        tps61052@33 {
index 8314e4171884bf225d56cb83a9313ab0e9b76340..dd4358bc26e228bd78a1eb35bea71ed1b177a0cf 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 9e61da404d5774f937f6ecebf37e1f80323d5147..419ea7413d232b510bdd532064029520ec4336a5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1310 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 0d8472e5ab9f618b88476f815d241a7ebd51697f..c9a54e06fb6849314d3c4bc25031f7153608d775 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1340 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index a26fc47a55e85485ba1b59b2fc9bc3b70d0a2690..d71fe2a68f098460249e076d54326d7f58fbd665 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1340 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 1f8e1e1481dfb96733f618f915240b08f7097410..f7b84aced654ca643bc49ebe83b376425a6b893f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr13xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -43,8 +43,8 @@
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 8 0x04
-                             0 9 0x04>;
+               interrupts = <0 6 0x04
+                             0 7 0x04>;
        };
 
        L2: l2-cache {
                gmac0: eth@e2000000 {
                        compatible = "st,spear600-gmac";
                        reg = <0xe2000000 0x8000>;
-                       interrupts = <0 23 0x4
-                                     0 24 0x4>;
+                       interrupts = <0 33 0x4
+                                     0 34 0x4>;
                        interrupt-names = "macirq", "eth_wake_irq";
                        status = "disabled";
                };
                        kbd@e0300000 {
                                compatible = "st,spear300-kbd";
                                reg = <0xe0300000 0x1000>;
+                               interrupts = <0 52 0x4>;
                                status = "disabled";
                        };
 
                        serial@e0000000 {
                                compatible = "arm,pl011", "arm,primecell";
                                reg = <0xe0000000 0x1000>;
-                               interrupts = <0 36 0x4>;
+                               interrupts = <0 35 0x4>;
                                status = "disabled";
                        };
 
index fc82b1a264588b34e31a251dd6c886ef754f39e4..d71b8d581e3d39f77ea8d783303b9eb8a2388030 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 01c5e358fdb271b00a409f7c33af0a77d276c6be..ed3627c116ccbd7c5e8f90208fab4c37ef7a97d0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index dc5e2d445a9352e3774b52dfec8ce0cd5fbaf4d9..b00544e0cd5d18e313eaf6807dc4c8a18294548b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index e47081c494d916bf4f21de313b03f2b63964394f..62fc4fb3e5f92f079cd3caaf0ccdf852e1ff01d1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 6308fa3bec1ec65c19ff3a22fda6f4297e2a26ac..e4e912f9502466da3ab44a26eda9f8c83e383d8e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -15,8 +15,8 @@
 /include/ "spear320.dtsi"
 
 / {
-       model = "ST SPEAr300 Evaluation Board";
-       compatible = "st,spear300-evb", "st,spear300";
+       model = "ST SPEAr320 Evaluation Board";
+       compatible = "st,spear320-evb", "st,spear320";
        #address-cells = <1>;
        #size-cells = <1>;
 
@@ -26,7 +26,7 @@
 
        ahb {
                pinmux@b3000000 {
-                       st,pinmux-mode = <3>;
+                       st,pinmux-mode = <4>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&state_default>;
 
index 5372ca399b1f37eba5de326b716c639cd3309be4..1f49d69595a06996b0c8b48d631ed116716f7abb 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 91072553963f02566caf2d6a42c727540998a162..3a8bb5736928292b1a78359342550e709e052a2d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr3xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 089f0a42c50ef4244765efc59450011637e33a08..a3c36e47d7efcafee29eb73865eb5dec5651d4a4 100644 (file)
                        timer@f0000000 {
                                compatible = "st,spear-timer";
                                reg = <0xf0000000 0x400>;
+                               interrupt-parent = <&vic0>;
                                interrupts = <16>;
                        };
                };
index 941b161ab78ce32c219ec6c88a1ea2f955e3fa77..7e1091d91af8b9d999b795414ebf0110c42b65c0 100644 (file)
                #address-cells = <0>;
                interrupt-controller;
                reg = <0x2c001000 0x1000>,
-                     <0x2c002000 0x100>;
+                     <0x2c002000 0x1000>,
+                     <0x2c004000 0x2000>,
+                     <0x2c006000 0x2000>;
+               interrupts = <1 9 0xf04>;
        };
 
        memory-controller@7ffd0000 {
                             <0 91 4>;
        };
 
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts = <1 13 0xf08>,
+                            <1 14 0xf08>,
+                            <1 11 0xf08>,
+                            <1 10 0xf08>;
+       };
+
        pmu {
                compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
                interrupts = <0 68 4>,
index 6905e66d474808f7021121dbb5e065cb7e5d64a0..18917a0f86047a3a444919badb09fb47da244bc4 100644 (file)
 
        timer@2c000600 {
                compatible = "arm,cortex-a5-twd-timer";
-               reg = <0x2c000600 0x38>;
-               interrupts = <1 2 0x304>,
-                            <1 3 0x304>;
+               reg = <0x2c000600 0x20>;
+               interrupts = <1 13 0x304>;
+       };
+
+       watchdog@2c000620 {
+               compatible = "arm,cortex-a5-twd-wdt";
+               reg = <0x2c000620 0x20>;
+               interrupts = <1 14 0x304>;
        };
 
        gic: interrupt-controller@2c001000 {
-               compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic";
+               compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
                #address-cells = <0>;
                interrupt-controller;
index da778693be548fdc87c6cb479ff6eefe71a7bf82..3f0c736d31d6bca211d1c76550ed28ca841a3c56 100644 (file)
        timer@1e000600 {
                compatible = "arm,cortex-a9-twd-timer";
                reg = <0x1e000600 0x20>;
-               interrupts = <1 2 0xf04>,
-                            <1 3 0xf04>;
+               interrupts = <1 13 0xf04>;
+       };
+
+       watchdog@1e000620 {
+               compatible = "arm,cortex-a9-twd-wdt";
+               reg = <0x1e000620 0x20>;
+               interrupts = <1 14 0xf04>;
        };
 
        gic: interrupt-controller@1e001000 {
index 9d7eb530f95fd926785170162812d3ef1096178d..aa07f5938f05cfac42414a79f475135ff827c260 100644 (file)
@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
        struct safe_buffer *buf;
        unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
 
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
        struct safe_buffer *buf;
        unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
 
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
index 9854ff4279e0891bfaaa59d8086df7ee247c5003..11828e632532accb5ab386ac9a0ffe173c6b9fdf 100644 (file)
@@ -176,7 +176,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
 CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_WDM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_LIBUSUAL=y
index 7e84f453e8a6f07e76c182badb2ee055eda1bde6..2d4f661d1cf6e757739429a4ba7730ea7be14eeb 100644 (file)
@@ -75,6 +75,7 @@ CONFIG_AB5500_CORE=y
 CONFIG_AB8500_CORE=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
 CONFIG_AB8500_USB=y
index 68374ba6a943e9d029246d927b64a52913ea525e..c79f61faa3a55e81ae773b6d9d83532821b24c33 100644 (file)
@@ -243,7 +243,7 @@ typedef struct {
 
 #define ATOMIC64_INIT(i) { (i) }
 
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
 {
        u64 result;
 
index 3d2220498abc2db2d98378c94a3309c71e91ccee..6ddbe446425e11524d927b5cd8b479bcd2419238 100644 (file)
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_USE_DOMAINS
-#define set_domain(x)                                  \
-       do {                                            \
-       __asm__ __volatile__(                           \
-       "mcr    p15, 0, %0, c3, c0      @ set domain"   \
-         : : "r" (x));                                 \
-       isb();                                          \
-       } while (0)
+static inline void set_domain(unsigned val)
+{
+       asm volatile(
+       "mcr    p15, 0, %0, c3, c0      @ set domain"
+         : : "r" (val));
+       isb();
+}
 
 #define modify_domain(dom,type)                                        \
        do {                                                    \
@@ -78,8 +78,8 @@
        } while (0)
 
 #else
-#define set_domain(x)          do { } while (0)
-#define modify_domain(dom,type)        do { } while (0)
+static inline void set_domain(unsigned val) { }
+static inline void modify_domain(unsigned dom, unsigned type)  { }
 #endif
 
 /*
index 7be54690aeec0f38be6bb2738a8a7df8a243021d..e42cf597f6e6b5b7bcad58cdf9e75289d66672d4 100644 (file)
@@ -19,6 +19,7 @@
        "       .long   1b, 4f, 2b, 4f\n"                       \
        "       .popsection\n"                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
+       "       .align  2\n"                                    \
        "4:     mov     %0, " err_reg "\n"                      \
        "       b       3b\n"                                   \
        "       .popsection"
index e0d1c0cfa54816fda646b9d1b2996d9e1176fff6..6b9b077d86b3788fabb129c0b18bfb722724a697 100644 (file)
@@ -4,7 +4,7 @@
  * ARM PrimeXsys System Controller SP810 header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index efdf99045d879e240b9bc41f1f0781efea6ac10f..d2de9cbbcd9bcaf6a9e5b76eefac1f8c8eb7b39d 100644 (file)
@@ -22,9 +22,6 @@
 typedef unsigned short         __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short         __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short         __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index b79f8e97f7755f22d82ae20ee00442cd11f7af02..af7b0bda3355d9af850ae722b2b1f55277ef6e67 100644 (file)
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
 #define TIF_SYSCALL_AUDIT      9
-#define TIF_SYSCALL_RESTARTSYS 10
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
-#define _TIF_SYSCALL_RESTARTSYS        (1 << TIF_SYSCALL_RESTARTSYS)
 
 /* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                          _TIF_SYSCALL_RESTARTSYS)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
 
 /*
  * Change these and you break ASM code in entry-common.S
index 437f0c426517cf39c1035c3e7a0b6cfecb9395cb..0d1851ca6eb993a628f623c257487200fe529604 100644 (file)
@@ -495,6 +495,7 @@ ENDPROC(__und_usr)
  * The out of line fixup for the ldrt above.
  */
        .pushsection .fixup, "ax"
+       .align  2
 4:     mov     pc, r9
        .popsection
        .pushsection __ex_table,"a"
index ba32b393b3f0c514c83799687348d52655bfe1da..38c1a3b103a0684b5b579bb74ca07f38bb91eb13 100644 (file)
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
        TEST_BF_R ("mov pc, r",0,2f,"")
        TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
        TEST_BB(   "sub pc, pc, #1b-2b+8")
-#if __LINUX_ARM_ARCH__ >= 6
-       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
 #endif
        TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
        TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
index 8f96ec778e8dd4537afab036cb8e28456b9f4e10..6123daf397a7bbb7ffe161075165ddf57f175d10 100644 (file)
@@ -660,7 +660,7 @@ static const union decode_item t32_table_1111_100x[] = {
        /* LDRSB (literal)      1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
        /* LDRH (literal)       1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
        /* LDRSH (literal)      1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
-       DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
+       DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
                                                 REGS(PC, NOSPPCX, 0, 0, 0)),
 
        /* STRB (immediate)     1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
index 186c8cb982c543a2cc1631796b34376151b33702..a02eada3aa5d06036027ac1241e652d5032781bd 100644 (file)
@@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
             event_requires_mode_exclusion(&event->attr)) {
                pr_debug("ARM performance counters do not support "
                         "mode exclusion\n");
-               return -EPERM;
+               return -EOPNOTSUPP;
        }
 
        /*
index 5700a7ae7f0bc1511ae048d7a3f025c401e5729c..14e38261cd31db9d852db2eb0b8046251a04613d 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/regset.h>
 #include <linux/audit.h>
 #include <linux/tracehook.h>
-#include <linux/unistd.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
                audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
                                    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
-       if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
-               scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
        if (!test_thread_flag(TIF_SYSCALL_TRACE))
                return scno;
 
index 17fc36c41cff6c155802fab60e3b04a3b5efd135..536c5d6b340b7fca2aee9c770000372c38be98bb 100644 (file)
 
 #include "signal.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * For ARM syscalls, we encode the syscall number into the instruction.
  */
 #define SWI_SYS_SIGRETURN      (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 #define SWI_SYS_RT_SIGRETURN   (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART                (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
 
 /*
  * With EABI, the syscall number has to be loaded into r7.
@@ -48,6 +47,18 @@ const unsigned long sigreturn_codes[7] = {
        MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
 };
 
+/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled.  In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+       SWI_SYS_RESTART,        /* swi  __NR_restart_syscall */
+       0xe49df004,             /* ldr  pc, [sp], #4 */
+};
+
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -210,10 +221,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
        int err;
 
        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
-       if (err == 0) {
-               sigdelsetmask(&set, ~_BLOCKABLE);
+       if (err == 0)
                set_current_blocked(&set);
-       }
 
        __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
        __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
@@ -528,13 +537,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
 /*
  * OK, we're invoking a handler
  */    
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka,
-             siginfo_t *info, sigset_t *oldset,
-             struct pt_regs * regs)
+             siginfo_t *info, struct pt_regs *regs)
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = current;
+       sigset_t *oldset = sigmask_to_save();
        int usig = sig;
        int ret;
 
@@ -559,17 +568,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
 
        if (ret != 0) {
                force_sigsegv(sig, tsk);
-               return ret;
+               return;
        }
-
-       /*
-        * Block the signal if we were successful.
-        */
-       block_sigmask(ka, sig);
-
-       tracehook_signal_handler(sig, info, ka, regs, 0);
-
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -604,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                case -ERESTARTNOHAND:
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
-               case -ERESTART_RESTARTBLOCK:
                        regs->ARM_r0 = regs->ARM_ORIG_r0;
                        regs->ARM_pc = restart_addr;
                        break;
+               case -ERESTART_RESTARTBLOCK:
+                       regs->ARM_r0 = -EINTR;
+                       break;
                }
        }
 
@@ -617,38 +620,21 @@ static void do_signal(struct pt_regs *regs, int syscall)
         */
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               sigset_t *oldset;
-
                /*
                 * Depending on the signal settings we may need to revert the
                 * decision to restart the system call.  But skip this if a
                 * debugger has chosen to restart at a different PC.
                 */
                if (regs->ARM_pc == restart_addr) {
-                       if (retval == -ERESTARTNOHAND ||
-                           retval == -ERESTART_RESTARTBLOCK
+                       if (retval == -ERESTARTNOHAND
                            || (retval == -ERESTARTSYS
                                && !(ka.sa.sa_flags & SA_RESTART))) {
                                regs->ARM_r0 = -EINTR;
                                regs->ARM_pc = continue_addr;
                        }
-                       clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
                }
 
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
 
@@ -659,15 +645,32 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * ignore the restart.
                 */
                if (retval == -ERESTART_RESTARTBLOCK
-                   && regs->ARM_pc == restart_addr)
-                       set_thread_flag(TIF_SYSCALL_RESTARTSYS);
+                   && regs->ARM_pc == continue_addr) {
+                       if (thumb_mode(regs)) {
+                               regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+                               regs->ARM_pc -= 2;
+                       } else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+                               regs->ARM_r7 = __NR_restart_syscall;
+                               regs->ARM_pc -= 4;
+#else
+                               u32 __user *usp;
+
+                               regs->ARM_sp -= 4;
+                               usp = (u32 __user *)regs->ARM_sp;
+
+                               if (put_user(regs->ARM_pc, usp) == 0) {
+                                       regs->ARM_pc = KERN_RESTART_CODE;
+                               } else {
+                                       regs->ARM_sp += 4;
+                                       force_sigsegv(0, current);
+                               }
+#endif
+                       }
+               }
        }
 
-       /* If there's no signal to deliver, we just put the saved sigmask
-        * back.
-        */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
+       restore_saved_sigmask();
 }
 
 asmlinkage void
@@ -679,7 +682,5 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
        if (thread_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 5ff067b7c7522f428b4343832ecb759b7ccf16de..6fcfe8398aa473051fbf72396986a84dc700978f 100644 (file)
@@ -8,5 +8,7 @@
  * published by the Free Software Foundation.
  */
 #define KERN_SIGRETURN_CODE    (CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE      (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
 
 extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
index b735521a4a5441f7764591bc06352d86b1ddae80..2c7217d971db0b42b9e1f5859459e18f9294f662 100644 (file)
@@ -109,7 +109,6 @@ static void percpu_timer_stop(void);
 int __cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
-       struct task_struct *p;
        int ret;
 
        ret = platform_cpu_disable(cpu);
@@ -139,12 +138,7 @@ int __cpu_disable(void)
        flush_cache_all();
        local_flush_tlb_all();
 
-       read_lock(&tasklist_lock);
-       for_each_process(p) {
-               if (p->mm)
-                       cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-       }
-       read_unlock(&tasklist_lock);
+       clear_tasks_mm_cpumask(cpu);
 
        return 0;
 }
index 4928d89758f4ce0dea767acdf9fcf2299dff7833..3647170e9a16ba3aa8838218ed99e74dbc5b7c59 100644 (file)
@@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
         */
        memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
               sigreturn_codes, sizeof(sigreturn_codes));
+       memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+              syscall_restart_code, sizeof(syscall_restart_code));
 
        flush_icache_range(vectors, vectors + PAGE_SIZE);
        modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
index 43a31fb06318dc0a1213827a5069b73cac19f5a6..36ff15bbfdd4961afecfef9ea292c6445e9b3836 100644 (file)
@@ -183,7 +183,9 @@ SECTIONS
        }
 #endif
 
+#ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
index 226949dc4ac04a242c40e215d23818421342b99c..f953bb54aa9d31d590791d97aaaf770aaaecfd93 100644 (file)
@@ -50,5 +50,6 @@
 #define POWER_MANAGEMENT       (BRIDGE_VIRT_BASE | 0x011c)
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE         (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index ad1165d488c13f393bf352a00f0e03b62265f3cc..d52b0ef313b7e53c2efa6de67d0ddeddc4790ca5 100644 (file)
@@ -78,6 +78,7 @@
 
 /* North-South Bridge */
 #define BRIDGE_VIRT_BASE       (DOVE_SB_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE       (DOVE_SB_REGS_PHYS_BASE | 0x20000)
 
 /* Cryptographic Engine */
 #define DOVE_CRYPT_PHYS_BASE   (DOVE_SB_REGS_PHYS_BASE | 0x30000)
index eb282378fa786bc076f0bcbc5ac5b7ba513a9f22..01abd3516a772ef93afe3d68c0556f587b1bb180 100644 (file)
@@ -82,8 +82,6 @@ static int snappercl15_nand_dev_ready(struct mtd_info *mtd)
        return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY);
 }
 
-static const char *snappercl15_nand_part_probes[] = {"cmdlinepart", NULL};
-
 static struct mtd_partition snappercl15_nand_parts[] = {
        {
                .name           = "Kernel",
@@ -100,10 +98,8 @@ static struct mtd_partition snappercl15_nand_parts[] = {
 static struct platform_nand_data snappercl15_nand_data = {
        .chip = {
                .nr_chips               = 1,
-               .part_probe_types       = snappercl15_nand_part_probes,
                .partitions             = snappercl15_nand_parts,
                .nr_partitions          = ARRAY_SIZE(snappercl15_nand_parts),
-               .options                = NAND_NO_AUTOINCR,
                .chip_delay             = 25,
        },
        .ctrl = {
index d4ef339d961ed2327f94b08f1c3afc4838119463..75cab2d7ec73a6a4a4619d606a2f9c506501a957 100644 (file)
@@ -105,8 +105,6 @@ static int ts72xx_nand_device_ready(struct mtd_info *mtd)
        return !!(__raw_readb(addr) & 0x20);
 }
 
-static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
-
 #define TS72XX_BOOTROM_PART_SIZE       (SZ_16K)
 #define TS72XX_REDBOOT_PART_SIZE       (SZ_2M + SZ_1M)
 
@@ -134,7 +132,6 @@ static struct platform_nand_data ts72xx_nand_data = {
                .nr_chips       = 1,
                .chip_offset    = 0,
                .chip_delay     = 15,
-               .part_probe_types = ts72xx_nand_part_probes,
                .partitions     = ts72xx_nand_parts,
                .nr_partitions  = ARRAY_SIZE(ts72xx_nand_parts),
        },
index 43ebe909441108097a30f0c9c7fc605407105501..6f6d13f91e4ca1fc65448ee4b4f950ea0eeeeebc 100644 (file)
@@ -62,6 +62,8 @@ config SOC_EXYNOS5250
        default y
        depends on ARCH_EXYNOS5
        select SAMSUNG_DMADEV
+       select S5P_PM if PM
+       select S5P_SLEEP if PM
        help
          Enable EXYNOS5250 SoC support
 
@@ -210,7 +212,7 @@ config MACH_SMDKV310
        select EXYNOS_DEV_SYSMMU
        select EXYNOS4_DEV_AHCI
        select SAMSUNG_DEV_KEYPAD
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select SAMSUNG_DEV_PWM
        select EXYNOS4_DEV_USB_OHCI
        select EXYNOS4_SETUP_FIMD0
@@ -262,7 +264,7 @@ config MACH_UNIVERSAL_C210
        select S5P_DEV_ONENAND
        select S5P_DEV_TV
        select EXYNOS_DEV_SYSMMU
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS_DEV_DRM
        select EXYNOS4_SETUP_FIMD0
        select EXYNOS4_SETUP_I2C1
@@ -301,7 +303,7 @@ config MACH_NURI
        select S5P_DEV_MFC
        select S5P_DEV_USB_EHCI
        select S5P_SETUP_MIPIPHY
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS_DEV_DRM
        select EXYNOS4_SETUP_FIMC
        select EXYNOS4_SETUP_FIMD0
@@ -339,7 +341,7 @@ config MACH_ORIGEN
        select SAMSUNG_DEV_PWM
        select EXYNOS_DEV_DRM
        select EXYNOS_DEV_SYSMMU
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS4_DEV_USB_OHCI
        select EXYNOS4_SETUP_FIMD0
        select EXYNOS4_SETUP_SDHCI
index 440a637c76f1933347050b82fb21e0275c1b327d..9b58024f7d43919fcc1c2c07ed6732d39bcd128d 100644 (file)
@@ -22,7 +22,7 @@ obj-$(CONFIG_PM)              += pm.o
 obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
 obj-$(CONFIG_CPU_IDLE)         += cpuidle.o
 
-obj-$(CONFIG_ARCH_EXYNOS4)     += pmu.o
+obj-$(CONFIG_ARCH_EXYNOS     += pmu.o
 
 obj-$(CONFIG_SMP)              += platsmp.o headsmp.o
 
index 5aa460b01fdf41b7e476ed371b07de5fd747a88c..fefa336be2b4baf8d841a809766be3bdf9338f83 100644 (file)
 
 #ifdef CONFIG_PM_SLEEP
 static struct sleep_save exynos5_clock_save[] = {
-       /* will be implemented */
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_TOP),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_DISP1_0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_FSYS),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_MAUDIO),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC1),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_DISP1),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_MFC),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_G3D),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_GEN),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_FSYS),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIC),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIS),
+       SAVE_ITEM(EXYNOS5_CLKGATE_BLOCK),
+       SAVE_ITEM(EXYNOS5_CLKDIV_TOP0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_TOP1),
+       SAVE_ITEM(EXYNOS5_CLKDIV_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKDIV_DISP1_0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_GEN),
+       SAVE_ITEM(EXYNOS5_CLKDIV_MAUDIO),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS1),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS2),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS3),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC1),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC2),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC3),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC4),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC5),
+       SAVE_ITEM(EXYNOS5_SCLK_DIV_ISP),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP1),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP2),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP3),
+       SAVE_ITEM(EXYNOS5_CLKSRC_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKSRC_DISP1_0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MAUDIO),
+       SAVE_ITEM(EXYNOS5_CLKSRC_FSYS),
+       SAVE_ITEM(EXYNOS5_CLKSRC_PERIC0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_PERIC1),
+       SAVE_ITEM(EXYNOS5_SCLK_SRC_ISP),
+       SAVE_ITEM(EXYNOS5_EPLL_CON0),
+       SAVE_ITEM(EXYNOS5_EPLL_CON1),
+       SAVE_ITEM(EXYNOS5_EPLL_CON2),
+       SAVE_ITEM(EXYNOS5_VPLL_CON0),
+       SAVE_ITEM(EXYNOS5_VPLL_CON1),
+       SAVE_ITEM(EXYNOS5_VPLL_CON2),
 };
 #endif
 
index 26dac2893b8e4f2a97bdaa0be50506b893083a50..cff0595d0d352c0d69cb9023b9b1cbf555fd16c4 100644 (file)
@@ -100,7 +100,7 @@ static int exynos4_enter_core0_aftr(struct cpuidle_device *dev,
        exynos4_set_wakeupmask();
 
        /* Set value of power down register for aftr mode */
-       exynos4_sys_powerdown_conf(SYS_AFTR);
+       exynos_sys_powerdown_conf(SYS_AFTR);
 
        __raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR);
        __raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG);
index 9d8da51e35caa794ff8d45405c789fa00d2e009d..a67ecfaf12160646bc05680b299ca044f039bb6e 100644 (file)
@@ -33,7 +33,7 @@ static inline void s3c_pm_arch_prepare_irqs(void)
        __raw_writel(tmp, S5P_WAKEUP_MASK);
 
        __raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
-       __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+       __raw_writel(s3c_irqwake_eintmask & 0xFFFFFFFE, S5P_EINT_WAKEUP_MASK);
 }
 
 static inline void s3c_pm_arch_stop_clocks(void)
index e76b7faba66b08da7f5da4c73816a15b924d7b3e..7c27c2d4bf44d2914c3c38a76c8b33b1f1ec611b 100644 (file)
@@ -23,12 +23,12 @@ enum sys_powerdown {
 };
 
 extern unsigned long l2x0_regs_phys;
-struct exynos4_pmu_conf {
+struct exynos_pmu_conf {
        void __iomem *reg;
        unsigned int val[NUM_SYS_POWERDOWN];
 };
 
-extern void exynos4_sys_powerdown_conf(enum sys_powerdown mode);
+extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
 extern void s3c_cpu_resume(void);
 
 #endif /* __ASM_ARCH_PMU_H */
index b78b5f3ad9c0d0168d8115e87d7fb2229b31c27c..8c9b38c9c5042d931ede0f3ab6f569cc3406dd86 100644 (file)
 
 #define EXYNOS5_CLKDIV_ACP                     EXYNOS_CLKREG(0x08500)
 
-#define EXYNOS5_CLKSRC_TOP2                    EXYNOS_CLKREG(0x10218)
 #define EXYNOS5_EPLL_CON0                      EXYNOS_CLKREG(0x10130)
 #define EXYNOS5_EPLL_CON1                      EXYNOS_CLKREG(0x10134)
+#define EXYNOS5_EPLL_CON2                      EXYNOS_CLKREG(0x10138)
 #define EXYNOS5_VPLL_CON0                      EXYNOS_CLKREG(0x10140)
 #define EXYNOS5_VPLL_CON1                      EXYNOS_CLKREG(0x10144)
+#define EXYNOS5_VPLL_CON2                      EXYNOS_CLKREG(0x10148)
 #define EXYNOS5_CPLL_CON0                      EXYNOS_CLKREG(0x10120)
 
 #define EXYNOS5_CLKSRC_TOP0                    EXYNOS_CLKREG(0x10210)
+#define EXYNOS5_CLKSRC_TOP1                    EXYNOS_CLKREG(0x10214)
+#define EXYNOS5_CLKSRC_TOP2                    EXYNOS_CLKREG(0x10218)
 #define EXYNOS5_CLKSRC_TOP3                    EXYNOS_CLKREG(0x1021C)
 #define EXYNOS5_CLKSRC_GSCL                    EXYNOS_CLKREG(0x10220)
 #define EXYNOS5_CLKSRC_DISP1_0                 EXYNOS_CLKREG(0x1022C)
+#define EXYNOS5_CLKSRC_MAUDIO                  EXYNOS_CLKREG(0x10240)
 #define EXYNOS5_CLKSRC_FSYS                    EXYNOS_CLKREG(0x10244)
 #define EXYNOS5_CLKSRC_PERIC0                  EXYNOS_CLKREG(0x10250)
+#define EXYNOS5_CLKSRC_PERIC1                  EXYNOS_CLKREG(0x10254)
+#define EXYNOS5_SCLK_SRC_ISP                   EXYNOS_CLKREG(0x10270)
 
 #define EXYNOS5_CLKSRC_MASK_TOP                        EXYNOS_CLKREG(0x10310)
 #define EXYNOS5_CLKSRC_MASK_GSCL               EXYNOS_CLKREG(0x10320)
 #define EXYNOS5_CLKSRC_MASK_DISP1_0            EXYNOS_CLKREG(0x1032C)
+#define EXYNOS5_CLKSRC_MASK_MAUDIO             EXYNOS_CLKREG(0x10334)
 #define EXYNOS5_CLKSRC_MASK_FSYS               EXYNOS_CLKREG(0x10340)
 #define EXYNOS5_CLKSRC_MASK_PERIC0             EXYNOS_CLKREG(0x10350)
+#define EXYNOS5_CLKSRC_MASK_PERIC1             EXYNOS_CLKREG(0x10354)
 
 #define EXYNOS5_CLKDIV_TOP0                    EXYNOS_CLKREG(0x10510)
 #define EXYNOS5_CLKDIV_TOP1                    EXYNOS_CLKREG(0x10514)
 #define EXYNOS5_CLKDIV_GSCL                    EXYNOS_CLKREG(0x10520)
 #define EXYNOS5_CLKDIV_DISP1_0                 EXYNOS_CLKREG(0x1052C)
 #define EXYNOS5_CLKDIV_GEN                     EXYNOS_CLKREG(0x1053C)
+#define EXYNOS5_CLKDIV_MAUDIO                  EXYNOS_CLKREG(0x10544)
 #define EXYNOS5_CLKDIV_FSYS0                   EXYNOS_CLKREG(0x10548)
 #define EXYNOS5_CLKDIV_FSYS1                   EXYNOS_CLKREG(0x1054C)
 #define EXYNOS5_CLKDIV_FSYS2                   EXYNOS_CLKREG(0x10550)
 #define EXYNOS5_CLKDIV_FSYS3                   EXYNOS_CLKREG(0x10554)
 #define EXYNOS5_CLKDIV_PERIC0                  EXYNOS_CLKREG(0x10558)
+#define EXYNOS5_CLKDIV_PERIC1                  EXYNOS_CLKREG(0x1055C)
+#define EXYNOS5_CLKDIV_PERIC2                  EXYNOS_CLKREG(0x10560)
+#define EXYNOS5_CLKDIV_PERIC3                  EXYNOS_CLKREG(0x10564)
+#define EXYNOS5_CLKDIV_PERIC4                  EXYNOS_CLKREG(0x10568)
+#define EXYNOS5_CLKDIV_PERIC5                  EXYNOS_CLKREG(0x1056C)
+#define EXYNOS5_SCLK_DIV_ISP                   EXYNOS_CLKREG(0x10580)
 
 #define EXYNOS5_CLKGATE_IP_ACP                 EXYNOS_CLKREG(0x08800)
 #define EXYNOS5_CLKGATE_IP_ISP0                        EXYNOS_CLKREG(0x0C800)
 #define EXYNOS5_CLKGATE_IP_GSCL                        EXYNOS_CLKREG(0x10920)
 #define EXYNOS5_CLKGATE_IP_DISP1               EXYNOS_CLKREG(0x10928)
 #define EXYNOS5_CLKGATE_IP_MFC                 EXYNOS_CLKREG(0x1092C)
+#define EXYNOS5_CLKGATE_IP_G3D                 EXYNOS_CLKREG(0x10930)
 #define EXYNOS5_CLKGATE_IP_GEN                 EXYNOS_CLKREG(0x10934)
 #define EXYNOS5_CLKGATE_IP_FSYS                        EXYNOS_CLKREG(0x10944)
 #define EXYNOS5_CLKGATE_IP_GPS                 EXYNOS_CLKREG(0x1094C)
index 4dbb8629b20025b091dd83016afa7f930accb776..43a99e6f56ab68e638621f8fb9e6b23c910005b3 100644 (file)
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-pmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * EXYNOS4 - Power management unit definition
+ * EXYNOS - Power management unit definition
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #define S5P_DIS_IRQ_CORE3                      S5P_PMUREG(0x1034)
 #define S5P_DIS_IRQ_CENTRAL3                   S5P_PMUREG(0x1038)
 
+/* For EXYNOS5 */
+
+#define EXYNOS5_USB_CFG                                                S5P_PMUREG(0x0230)
+
+#define EXYNOS5_ARM_CORE0_SYS_PWR_REG                          S5P_PMUREG(0x1000)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG            S5P_PMUREG(0x1004)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG          S5P_PMUREG(0x1008)
+#define EXYNOS5_ARM_CORE1_SYS_PWR_REG                          S5P_PMUREG(0x1010)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG            S5P_PMUREG(0x1014)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG          S5P_PMUREG(0x1018)
+#define EXYNOS5_FSYS_ARM_SYS_PWR_REG                           S5P_PMUREG(0x1040)
+#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG           S5P_PMUREG(0x1048)
+#define EXYNOS5_ISP_ARM_SYS_PWR_REG                            S5P_PMUREG(0x1050)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG              S5P_PMUREG(0x1054)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG            S5P_PMUREG(0x1058)
+#define EXYNOS5_ARM_COMMON_SYS_PWR_REG                         S5P_PMUREG(0x1080)
+#define EXYNOS5_ARM_L2_SYS_PWR_REG                             S5P_PMUREG(0x10C0)
+#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG                       S5P_PMUREG(0x1100)
+#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG                       S5P_PMUREG(0x1104)
+#define EXYNOS5_CMU_RESET_SYS_PWR_REG                          S5P_PMUREG(0x110C)
+#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG                        S5P_PMUREG(0x1120)
+#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG                        S5P_PMUREG(0x1124)
+#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG                   S5P_PMUREG(0x112C)
+#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG                     S5P_PMUREG(0x1130)
+#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG                      S5P_PMUREG(0x1134)
+#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG                     S5P_PMUREG(0x1138)
+#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1140)
+#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1144)
+#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1148)
+#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x114C)
+#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1150)
+#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1154)
+#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG                    S5P_PMUREG(0x1164)
+#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG                    S5P_PMUREG(0x1170)
+#define EXYNOS5_TOP_BUS_SYS_PWR_REG                            S5P_PMUREG(0x1180)
+#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG                      S5P_PMUREG(0x1184)
+#define EXYNOS5_TOP_PWR_SYS_PWR_REG                            S5P_PMUREG(0x1188)
+#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG                     S5P_PMUREG(0x1190)
+#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG               S5P_PMUREG(0x1194)
+#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG                     S5P_PMUREG(0x1198)
+#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG                                S5P_PMUREG(0x11A0)
+#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG                                S5P_PMUREG(0x11A4)
+#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG                 S5P_PMUREG(0x11B0)
+#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG                 S5P_PMUREG(0x11B4)
+#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11C0)
+#define EXYNOS5_G2D_MEM_SYS_PWR_REG                            S5P_PMUREG(0x11C8)
+#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11CC)
+#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG                          S5P_PMUREG(0x11D0)
+#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG                          S5P_PMUREG(0x11D4)
+#define EXYNOS5_SECSS_MEM_SYS_PWR_REG                          S5P_PMUREG(0x11D8)
+#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG                                S5P_PMUREG(0x11DC)
+#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11E0)
+#define EXYNOS5_INTROM_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11E4)
+#define EXYNOS5_JPEG_MEM_SYS_PWR_REG                           S5P_PMUREG(0x11E8)
+#define EXYNOS5_HSI_MEM_SYS_PWR_REG                            S5P_PMUREG(0x11EC)
+#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11F4)
+#define EXYNOS5_SATA_MEM_SYS_PWR_REG                           S5P_PMUREG(0x11FC)
+#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG                 S5P_PMUREG(0x1200)
+#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG                  S5P_PMUREG(0x1204)
+#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG               S5P_PMUREG(0x1208)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG                 S5P_PMUREG(0x1220)
+#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG                 S5P_PMUREG(0x1224)
+#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG                 S5P_PMUREG(0x1228)
+#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG                 S5P_PMUREG(0x122C)
+#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG                 S5P_PMUREG(0x1230)
+#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG                 S5P_PMUREG(0x1234)
+#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG                  S5P_PMUREG(0x1238)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG          S5P_PMUREG(0x123C)
+#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG                      S5P_PMUREG(0x1240)
+#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG               S5P_PMUREG(0x1250)
+#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG                                S5P_PMUREG(0x1260)
+#define EXYNOS5_XUSBXTI_SYS_PWR_REG                            S5P_PMUREG(0x1280)
+#define EXYNOS5_XXTI_SYS_PWR_REG                               S5P_PMUREG(0x1284)
+#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG                      S5P_PMUREG(0x12C0)
+#define EXYNOS5_GPIO_MODE_SYS_PWR_REG                          S5P_PMUREG(0x1300)
+#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG                   S5P_PMUREG(0x1320)
+#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG                      S5P_PMUREG(0x1340)
+#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG                      S5P_PMUREG(0x1344)
+#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG                  S5P_PMUREG(0x1348)
+#define EXYNOS5_GSCL_SYS_PWR_REG                               S5P_PMUREG(0x1400)
+#define EXYNOS5_ISP_SYS_PWR_REG                                        S5P_PMUREG(0x1404)
+#define EXYNOS5_MFC_SYS_PWR_REG                                        S5P_PMUREG(0x1408)
+#define EXYNOS5_G3D_SYS_PWR_REG                                        S5P_PMUREG(0x140C)
+#define EXYNOS5_DISP1_SYS_PWR_REG                              S5P_PMUREG(0x1414)
+#define EXYNOS5_MAU_SYS_PWR_REG                                        S5P_PMUREG(0x1418)
+#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG                   S5P_PMUREG(0x1480)
+#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG                    S5P_PMUREG(0x1484)
+#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG                    S5P_PMUREG(0x1488)
+#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG                    S5P_PMUREG(0x148C)
+#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG                  S5P_PMUREG(0x1494)
+#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG                    S5P_PMUREG(0x1498)
+#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG                    S5P_PMUREG(0x14C0)
+#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG                     S5P_PMUREG(0x14C4)
+#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG                     S5P_PMUREG(0x14C8)
+#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG                     S5P_PMUREG(0x14CC)
+#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG                   S5P_PMUREG(0x14D4)
+#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG                     S5P_PMUREG(0x14D8)
+#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG                     S5P_PMUREG(0x1580)
+#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG                      S5P_PMUREG(0x1584)
+#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG                      S5P_PMUREG(0x1588)
+#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG                      S5P_PMUREG(0x158C)
+#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG                    S5P_PMUREG(0x1594)
+#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG                      S5P_PMUREG(0x1598)
+
+#define EXYNOS5_ARM_CORE0_OPTION                               S5P_PMUREG(0x2008)
+#define EXYNOS5_ARM_CORE1_OPTION                               S5P_PMUREG(0x2088)
+#define EXYNOS5_FSYS_ARM_OPTION                                        S5P_PMUREG(0x2208)
+#define EXYNOS5_ISP_ARM_OPTION                                 S5P_PMUREG(0x2288)
+#define EXYNOS5_ARM_COMMON_OPTION                              S5P_PMUREG(0x2408)
+#define EXYNOS5_TOP_PWR_OPTION                                 S5P_PMUREG(0x2C48)
+#define EXYNOS5_TOP_PWR_SYSMEM_OPTION                          S5P_PMUREG(0x2CC8)
+#define EXYNOS5_JPEG_MEM_OPTION                                        S5P_PMUREG(0x2F48)
+#define EXYNOS5_GSCL_STATUS                                    S5P_PMUREG(0x4004)
+#define EXYNOS5_ISP_STATUS                                     S5P_PMUREG(0x4024)
+#define EXYNOS5_GSCL_OPTION                                    S5P_PMUREG(0x4008)
+#define EXYNOS5_ISP_OPTION                                     S5P_PMUREG(0x4028)
+#define EXYNOS5_MFC_OPTION                                     S5P_PMUREG(0x4048)
+#define EXYNOS5_G3D_CONFIGURATION                              S5P_PMUREG(0x4060)
+#define EXYNOS5_G3D_STATUS                                     S5P_PMUREG(0x4064)
+#define EXYNOS5_G3D_OPTION                                     S5P_PMUREG(0x4068)
+#define EXYNOS5_DISP1_OPTION                                   S5P_PMUREG(0x40A8)
+#define EXYNOS5_MAU_OPTION                                     S5P_PMUREG(0x40C8)
+
+#define EXYNOS5_USE_SC_FEEDBACK                                        (1 << 1)
+#define EXYNOS5_USE_SC_COUNTER                                 (1 << 0)
+
+#define EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL                    (1 << 2)
+#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN                 (1 << 7)
+
+#define EXYNOS5_OPTION_USE_STANDBYWFE                          (1 << 24)
+#define EXYNOS5_OPTION_USE_STANDBYWFI                          (1 << 16)
+
+#define EXYNOS5_OPTION_USE_RETENTION                           (1 << 4)
+
 #endif /* __ASM_ARCH_REGS_PMU_H */
index 972983e392bc6d8dd2b34bd07798dcbfc5dfb4b4..656f8fc9addd3b67ecaafbe9207f279da01b3b70 100644 (file)
@@ -237,25 +237,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 #else
 /* Frame Buffer */
 static struct s3c_fb_pd_win nuri_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 64,
-               .right_margin   = 16,
-               .upper_margin   = 64,
-               .lower_margin   = 1,
-               .hsync_len      = 48,
-               .vsync_len      = 3,
-               .xres           = 1024,
-               .yres           = 600,
-               .refresh        = 60,
-       },
        .max_bpp        = 24,
        .default_bpp    = 16,
+       .xres           = 1024,
+       .yres           = 600,
        .virtual_x      = 1024,
        .virtual_y      = 2 * 600,
 };
 
+static struct fb_videomode nuri_lcd_timing = {
+       .left_margin    = 64,
+       .right_margin   = 16,
+       .upper_margin   = 64,
+       .lower_margin   = 1,
+       .hsync_len      = 48,
+       .vsync_len      = 3,
+       .xres           = 1024,
+       .yres           = 600,
+       .refresh        = 60,
+};
+
 static struct s3c_fb_platdata nuri_fb_pdata __initdata = {
        .win[0]         = &nuri_fb_win0,
+       .vtiming        = &nuri_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
                          VIDCON0_CLKSEL_LCD,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
index a7f7fd567ddee48b11e2be362c854c9e7f347f18..f5572be9d7bf38480f931618d27507738aaf1ad6 100644 (file)
@@ -604,24 +604,28 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 };
 #else
 static struct s3c_fb_pd_win origen_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 64,
-               .right_margin   = 16,
-               .upper_margin   = 64,
-               .lower_margin   = 16,
-               .hsync_len      = 48,
-               .vsync_len      = 3,
-               .xres           = 1024,
-               .yres           = 600,
-       },
+       .xres                   = 1024,
+       .yres                   = 600,
        .max_bpp                = 32,
        .default_bpp            = 24,
        .virtual_x              = 1024,
        .virtual_y              = 2 * 600,
 };
 
+static struct fb_videomode origen_lcd_timing = {
+       .left_margin    = 64,
+       .right_margin   = 16,
+       .upper_margin   = 64,
+       .lower_margin   = 16,
+       .hsync_len      = 48,
+       .vsync_len      = 3,
+       .xres           = 1024,
+       .yres           = 600,
+};
+
 static struct s3c_fb_platdata origen_lcd_pdata __initdata = {
        .win[0]         = &origen_fb_win0,
+       .vtiming        = &origen_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
                                VIDCON1_INV_VCLK,
index 70df1a0c21187034ddea5153c90c55bda9611778..262e9e446a96a62934f58b978c81dcdd98e1c1a3 100644 (file)
@@ -178,22 +178,26 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 };
 #else
 static struct s3c_fb_pd_win smdkv310_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 13,
-               .right_margin   = 8,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
-       .max_bpp                = 32,
-       .default_bpp            = 24,
+       .max_bpp        = 32,
+       .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdkv310_lcd_timing = {
+       .left_margin    = 13,
+       .right_margin   = 8,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdkv310_lcd0_pdata __initdata = {
        .win[0]         = &smdkv310_fb_win0,
+       .vtiming        = &smdkv310_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = exynos4_fimd0_gpio_setup_24bpp,
index 083b44de9c10090dc887149c2ead7bb31858246a..cd92fa86ba41248da1d8de5e8f122edca102d290 100644 (file)
@@ -843,25 +843,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 #else
 /* Frame Buffer */
 static struct s3c_fb_pd_win universal_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 16,
-               .right_margin   = 16,
-               .upper_margin   = 2,
-               .lower_margin   = 28,
-               .hsync_len      = 2,
-               .vsync_len      = 1,
-               .xres           = 480,
-               .yres           = 800,
-               .refresh        = 55,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 800,
        .virtual_x      = 480,
        .virtual_y      = 2 * 800,
 };
 
+static struct fb_videomode universal_lcd_timing = {
+       .left_margin    = 16,
+       .right_margin   = 16,
+       .upper_margin   = 2,
+       .lower_margin   = 28,
+       .hsync_len      = 2,
+       .vsync_len      = 1,
+       .xres           = 480,
+       .yres           = 800,
+       .refresh        = 55,
+};
+
 static struct s3c_fb_platdata universal_lcd_pdata __initdata = {
        .win[0]         = &universal_fb_win0,
+       .vtiming        = &universal_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
                          VIDCON0_CLKSEL_LCD,
        .vidcon1        = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
index 563dea9a6dbb224ab850e8bb3490da95e52eb791..c06c992943a139bc3017854b19c4eba1cb09d4c5 100644 (file)
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pm.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * EXYNOS4210 - Power Management support
+ * EXYNOS - Power Management support
  *
  * Based on arch/arm/mach-s3c2410/pm.c
  * Copyright (c) 2006 Simtec Electronics
@@ -63,90 +62,7 @@ static struct sleep_save exynos4_vpll_save[] = {
        SAVE_ITEM(EXYNOS4_VPLL_CON1),
 };
 
-static struct sleep_save exynos4_core_save[] = {
-       /* GIC side */
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x000),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x004),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x008),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x00C),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x014),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x018),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x000),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x004),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x100),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x104),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x108),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x300),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x304),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x308),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x400),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x404),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x408),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x40C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x410),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x414),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x418),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x41C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x420),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x424),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x428),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x42C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x430),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x434),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x438),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x43C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x440),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x444),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x448),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x44C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x450),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x454),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x458),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x45C),
-
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x800),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x804),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x808),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x80C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x810),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x814),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x818),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x81C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x820),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x824),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x828),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x82C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x830),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x834),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x838),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x83C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x840),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x844),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x848),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x84C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x850),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x854),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x858),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x85C),
-
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC00),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC04),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC08),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC0C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC10),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC14),
-
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x000),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x010),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x020),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x030),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x040),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x050),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x060),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x070),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x080),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x090),
-
+static struct sleep_save exynos_core_save[] = {
        /* SROM side */
        SAVE_ITEM(S5P_SROM_BW),
        SAVE_ITEM(S5P_SROM_BC0),
@@ -159,9 +75,11 @@ static struct sleep_save exynos4_core_save[] = {
 /* For Cortex-A9 Diagnostic and Power control register */
 static unsigned int save_arm_register[2];
 
-static int exynos4_cpu_suspend(unsigned long arg)
+static int exynos_cpu_suspend(unsigned long arg)
 {
+#ifdef CONFIG_CACHE_L2X0
        outer_flush_all();
+#endif
 
        /* issue the standby signal into the pm unit. */
        cpu_do_idle();
@@ -170,19 +88,25 @@ static int exynos4_cpu_suspend(unsigned long arg)
        panic("sleep resumed to originator?");
 }
 
-static void exynos4_pm_prepare(void)
+static void exynos_pm_prepare(void)
 {
-       u32 tmp;
+       unsigned int tmp;
 
-       s3c_pm_do_save(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
-       s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
-       s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+       s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       tmp = __raw_readl(S5P_INFORM1);
+       if (!soc_is_exynos5250()) {
+               s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
+               s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+       } else {
+               /* Disable USE_RETENTION of JPEG_MEM_OPTION */
+               tmp = __raw_readl(EXYNOS5_JPEG_MEM_OPTION);
+               tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
+               __raw_writel(tmp, EXYNOS5_JPEG_MEM_OPTION);
+       }
 
        /* Set value of power down register for sleep mode */
 
-       exynos4_sys_powerdown_conf(SYS_SLEEP);
+       exynos_sys_powerdown_conf(SYS_SLEEP);
        __raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1);
 
        /* ensure at least INFORM0 has the resume address */
@@ -191,17 +115,18 @@ static void exynos4_pm_prepare(void)
 
        /* Before enter central sequence mode, clock src register have to set */
 
-       s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
+       if (!soc_is_exynos5250())
+               s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
 
        if (soc_is_exynos4210())
                s3c_pm_do_restore_core(exynos4210_set_clksrc, ARRAY_SIZE(exynos4210_set_clksrc));
 
 }
 
-static int exynos4_pm_add(struct device *dev, struct subsys_interface *sif)
+static int exynos_pm_add(struct device *dev, struct subsys_interface *sif)
 {
-       pm_cpu_prep = exynos4_pm_prepare;
-       pm_cpu_sleep = exynos4_cpu_suspend;
+       pm_cpu_prep = exynos_pm_prepare;
+       pm_cpu_sleep = exynos_cpu_suspend;
 
        return 0;
 }
@@ -273,13 +198,13 @@ static void exynos4_restore_pll(void)
        } while (epll_wait || vpll_wait);
 }
 
-static struct subsys_interface exynos4_pm_interface = {
-       .name           = "exynos4_pm",
+static struct subsys_interface exynos_pm_interface = {
+       .name           = "exynos_pm",
        .subsys         = &exynos_subsys,
-       .add_dev        = exynos4_pm_add,
+       .add_dev        = exynos_pm_add,
 };
 
-static __init int exynos4_pm_drvinit(void)
+static __init int exynos_pm_drvinit(void)
 {
        struct clk *pll_base;
        unsigned int tmp;
@@ -292,18 +217,20 @@ static __init int exynos4_pm_drvinit(void)
        tmp |= ((0xFF << 8) | (0x1F << 1));
        __raw_writel(tmp, S5P_WAKEUP_MASK);
 
-       pll_base = clk_get(NULL, "xtal");
+       if (!soc_is_exynos5250()) {
+               pll_base = clk_get(NULL, "xtal");
 
-       if (!IS_ERR(pll_base)) {
-               pll_base_rate = clk_get_rate(pll_base);
-               clk_put(pll_base);
+               if (!IS_ERR(pll_base)) {
+                       pll_base_rate = clk_get_rate(pll_base);
+                       clk_put(pll_base);
+               }
        }
 
-       return subsys_interface_register(&exynos4_pm_interface);
+       return subsys_interface_register(&exynos_pm_interface);
 }
-arch_initcall(exynos4_pm_drvinit);
+arch_initcall(exynos_pm_drvinit);
 
-static int exynos4_pm_suspend(void)
+static int exynos_pm_suspend(void)
 {
        unsigned long tmp;
 
@@ -313,27 +240,27 @@ static int exynos4_pm_suspend(void)
        tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
        __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
 
-       if (soc_is_exynos4212() || soc_is_exynos4412()) {
-               tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION);
-               tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM |
-                        S5P_USE_STANDBYWFE_ISP_ARM);
-               __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
-       }
+       /* Setting SEQ_OPTION register */
+
+       tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
+       __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
 
-       /* Save Power control register */
-       asm ("mrc p15, 0, %0, c15, c0, 0"
-            : "=r" (tmp) : : "cc");
-       save_arm_register[0] = tmp;
+       if (!soc_is_exynos5250()) {
+               /* Save Power control register */
+               asm ("mrc p15, 0, %0, c15, c0, 0"
+                    : "=r" (tmp) : : "cc");
+               save_arm_register[0] = tmp;
 
-       /* Save Diagnostic register */
-       asm ("mrc p15, 0, %0, c15, c0, 1"
-            : "=r" (tmp) : : "cc");
-       save_arm_register[1] = tmp;
+               /* Save Diagnostic register */
+               asm ("mrc p15, 0, %0, c15, c0, 1"
+                    : "=r" (tmp) : : "cc");
+               save_arm_register[1] = tmp;
+       }
 
        return 0;
 }
 
-static void exynos4_pm_resume(void)
+static void exynos_pm_resume(void)
 {
        unsigned long tmp;
 
@@ -350,17 +277,19 @@ static void exynos4_pm_resume(void)
                /* No need to perform below restore code */
                goto early_wakeup;
        }
-       /* Restore Power control register */
-       tmp = save_arm_register[0];
-       asm volatile ("mcr p15, 0, %0, c15, c0, 0"
-                     : : "r" (tmp)
-                     : "cc");
-
-       /* Restore Diagnostic register */
-       tmp = save_arm_register[1];
-       asm volatile ("mcr p15, 0, %0, c15, c0, 1"
-                     : : "r" (tmp)
-                     : "cc");
+       if (!soc_is_exynos5250()) {
+               /* Restore Power control register */
+               tmp = save_arm_register[0];
+               asm volatile ("mcr p15, 0, %0, c15, c0, 0"
+                             : : "r" (tmp)
+                             : "cc");
+
+               /* Restore Diagnostic register */
+               tmp = save_arm_register[1];
+               asm volatile ("mcr p15, 0, %0, c15, c0, 1"
+                             : : "r" (tmp)
+                             : "cc");
+       }
 
        /* For release retention */
 
@@ -372,26 +301,28 @@ static void exynos4_pm_resume(void)
        __raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION);
        __raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION);
 
-       s3c_pm_do_restore_core(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
+       s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       exynos4_restore_pll();
+       if (!soc_is_exynos5250()) {
+               exynos4_restore_pll();
 
 #ifdef CONFIG_SMP
-       scu_enable(S5P_VA_SCU);
+               scu_enable(S5P_VA_SCU);
 #endif
+       }
 
 early_wakeup:
        return;
 }
 
-static struct syscore_ops exynos4_pm_syscore_ops = {
-       .suspend        = exynos4_pm_suspend,
-       .resume         = exynos4_pm_resume,
+static struct syscore_ops exynos_pm_syscore_ops = {
+       .suspend        = exynos_pm_suspend,
+       .resume         = exynos_pm_resume,
 };
 
-static __init int exynos4_pm_syscore_init(void)
+static __init int exynos_pm_syscore_init(void)
 {
-       register_syscore_ops(&exynos4_pm_syscore_ops);
+       register_syscore_ops(&exynos_pm_syscore_ops);
        return 0;
 }
-arch_initcall(exynos4_pm_syscore_init);
+arch_initcall(exynos_pm_syscore_init);
index e9fafcf163de8982287876a7ca6d6cf94488f472..373c3c00d24cdbbe054a1aae60625d97188e6208 100644 (file)
@@ -119,7 +119,9 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
                                                struct exynos_pm_domain *pd)
 {
        if (pdev->dev.bus) {
-               if (pm_genpd_add_device(&pd->pd, &pdev->dev))
+               if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
+                       pm_genpd_dev_need_restore(&pdev->dev, true);
+               else
                        pr_info("%s: error in adding %s device to %s power"
                                "domain\n", __func__, dev_name(&pdev->dev),
                                pd->name);
@@ -151,9 +153,12 @@ static __init int exynos4_pm_init_power_domain(void)
        if (of_have_populated_dt())
                return exynos_pm_dt_parse_domains();
 
-       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
-               pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
-                               exynos4_pm_domains[idx]->is_off);
+       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
+               struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
+               int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+               pm_genpd_init(&pd->pd, NULL, !on);
+       }
 
 #ifdef CONFIG_S5P_DEV_FIMD0
        exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
index 77c6815eebeea9c6070ccab7474f5282368b037e..4aacb66f71618da5d865a04a868878ef380aebea 100644 (file)
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pmu.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  *
- * EXYNOS4210 - CPU PMU(Power Management Unit) support
+ * EXYNOS - CPU PMU(Power Management Unit) support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/bug.h>
 
 #include <mach/regs-clock.h>
 #include <mach/pmu.h>
 
-static struct exynos4_pmu_conf *exynos4_pmu_config;
+static struct exynos_pmu_conf *exynos_pmu_config;
 
-static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
+static struct exynos_pmu_conf exynos4210_pmu_config[] = {
        /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
        { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
        { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
@@ -94,7 +94,7 @@ static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
        { PMU_TABLE_END,},
 };
 
-static struct exynos4_pmu_conf exynos4x12_pmu_config[] = {
+static struct exynos_pmu_conf exynos4x12_pmu_config[] = {
        { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
        { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
        { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
@@ -202,7 +202,7 @@ static struct exynos4_pmu_conf exynos4x12_pmu_config[] = {
        { PMU_TABLE_END,},
 };
 
-static struct exynos4_pmu_conf exynos4412_pmu_config[] = {
+static struct exynos_pmu_conf exynos4412_pmu_config[] = {
        { S5P_ARM_CORE2_LOWPWR,                 { 0x0, 0x0, 0x2 } },
        { S5P_DIS_IRQ_CORE2,                    { 0x0, 0x0, 0x0 } },
        { S5P_DIS_IRQ_CENTRAL2,                 { 0x0, 0x0, 0x0 } },
@@ -212,13 +212,174 @@ static struct exynos4_pmu_conf exynos4412_pmu_config[] = {
        { PMU_TABLE_END,},
 };
 
-void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
+static struct exynos_pmu_conf exynos5250_pmu_config[] = {
+       /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
+       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5_FSYS_ARM_SYS_PWR_REG,                 { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
+       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x3, 0x3, 0x3} },
+       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x0, 0x3} },
+       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x3} },
+       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
+       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x1} },
+       { EXYNOS5_USBOTG_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_G2D_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_USBDRD_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SDMMC_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_CSSYS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SECSS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG,              { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTRAM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTROM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_JPEG_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_HSI_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SATA_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x1} },
+       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x1} },
+       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_DISP1_SYS_PWR_REG,                    { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MAU_SYS_PWR_REG,                      { 0x7, 0x7, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG,          { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG,           { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+void __iomem *exynos5_list_both_cnt_feed[] = {
+       EXYNOS5_ARM_CORE0_OPTION,
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_ARM_COMMON_OPTION,
+       EXYNOS5_GSCL_OPTION,
+       EXYNOS5_ISP_OPTION,
+       EXYNOS5_MFC_OPTION,
+       EXYNOS5_G3D_OPTION,
+       EXYNOS5_DISP1_OPTION,
+       EXYNOS5_MAU_OPTION,
+       EXYNOS5_TOP_PWR_OPTION,
+       EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+void __iomem *exynos5_list_diable_wfi_wfe[] = {
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_FSYS_ARM_OPTION,
+       EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5_init_pmu(void)
 {
        unsigned int i;
+       unsigned int tmp;
+
+       /*
+        * Enable both SC_FEEDBACK and SC_COUNTER
+        */
+       for (i = 0 ; i < ARRAY_SIZE(exynos5_list_both_cnt_feed) ; i++) {
+               tmp = __raw_readl(exynos5_list_both_cnt_feed[i]);
+               tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+                       EXYNOS5_USE_SC_COUNTER);
+               __raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+       }
+
+       /*
+        * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+        * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
+        */
+       tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+       tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
+               EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+       __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+       /*
+        * Disable WFI/WFE on XXX_OPTION
+        */
+       for (i = 0 ; i < ARRAY_SIZE(exynos5_list_diable_wfi_wfe) ; i++) {
+               tmp = __raw_readl(exynos5_list_diable_wfi_wfe[i]);
+               tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+                        EXYNOS5_OPTION_USE_STANDBYWFI);
+               __raw_writel(tmp, exynos5_list_diable_wfi_wfe[i]);
+       }
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+       unsigned int i;
+
+       if (soc_is_exynos5250())
+               exynos5_init_pmu();
 
-       for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++)
-               __raw_writel(exynos4_pmu_config[i].val[mode],
-                               exynos4_pmu_config[i].reg);
+       for (i = 0; (exynos_pmu_config[i].reg != PMU_TABLE_END) ; i++)
+               __raw_writel(exynos_pmu_config[i].val[mode],
+                               exynos_pmu_config[i].reg);
 
        if (soc_is_exynos4412()) {
                for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
@@ -227,20 +388,23 @@ void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
        }
 }
 
-static int __init exynos4_pmu_init(void)
+static int __init exynos_pmu_init(void)
 {
-       exynos4_pmu_config = exynos4210_pmu_config;
+       exynos_pmu_config = exynos4210_pmu_config;
 
        if (soc_is_exynos4210()) {
-               exynos4_pmu_config = exynos4210_pmu_config;
+               exynos_pmu_config = exynos4210_pmu_config;
                pr_info("EXYNOS4210 PMU Initialize\n");
        } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
-               exynos4_pmu_config = exynos4x12_pmu_config;
+               exynos_pmu_config = exynos4x12_pmu_config;
                pr_info("EXYNOS4x12 PMU Initialize\n");
+       } else if (soc_is_exynos5250()) {
+               exynos_pmu_config = exynos5250_pmu_config;
+               pr_info("EXYNOS5250 PMU Initialize\n");
        } else {
-               pr_info("EXYNOS4: PMU not supported\n");
+               pr_info("EXYNOS: PMU not supported\n");
        }
 
        return 0;
 }
-arch_initcall(exynos4_pmu_init);
+arch_initcall(exynos_pmu_init);
index f8437dd238c2865adb462f8c1d23d575bda93cf1..ded4652ada803221ea97a92bb50dc6d8d6fef64e 100644 (file)
@@ -1,4 +1,8 @@
-obj-y                                  := clock.o highbank.o system.o
+obj-y                                  := clock.o highbank.o system.o smc.o
+
+plus_sec := $(call as-instr,.arch_extension sec,+sec)
+AFLAGS_smc.o                           :=-Wa,-march=armv7-a$(plus_sec)
+
 obj-$(CONFIG_DEBUG_HIGHBANK_UART)      += lluart.o
 obj-$(CONFIG_SMP)                      += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)              += hotplug.o
index d8e2d0be64ac365dc665b1e7e1a52c4549973f77..141ed5171826acbc3caa6a1b7e843f2737361f65 100644 (file)
@@ -8,3 +8,4 @@ extern void highbank_lluart_map_io(void);
 static inline void highbank_lluart_map_io(void) {}
 #endif
 
+extern void highbank_smc1(int fn, int arg);
index 410a112bb52e29036d22c1de07ffed1965011389..8777612b1a42b2dd6fd1a49df59ff6a93153b9d6 100644 (file)
@@ -85,10 +85,24 @@ const static struct of_device_id irq_match[] = {
        {}
 };
 
+#ifdef CONFIG_CACHE_L2X0
+static void highbank_l2x0_disable(void)
+{
+       /* Disable PL310 L2 Cache controller */
+       highbank_smc1(0x102, 0x0);
+}
+#endif
+
 static void __init highbank_init_irq(void)
 {
        of_irq_init(irq_match);
+
+#ifdef CONFIG_CACHE_L2X0
+       /* Enable PL310 L2 Cache controller */
+       highbank_smc1(0x102, 0x1);
        l2x0_of_init(0, ~0UL);
+       outer_cache.disable = highbank_l2x0_disable;
+#endif
 }
 
 static void __init highbank_timer_init(void)
diff --git a/arch/arm/mach-highbank/smc.S b/arch/arm/mach-highbank/smc.S
new file mode 100644 (file)
index 0000000..407d17b
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copied from omap44xx-smc.S Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * This is common routine to manage secure monitor API
+ * used to modify the PL310 secure registers.
+ * 'r0' contains the value to be modified and 'r12' contains
+ * the monitor API number.
+ * Function signature : void highbank_smc1(u32 fn, u32 arg)
+ */
+
+ENTRY(highbank_smc1)
+       stmfd   sp!, {r4-r11, lr}
+       mov     r12, r0
+       mov     r0, r1
+       dsb
+       smc     #0
+       ldmfd   sp!, {r4-r11, pc}
+ENDPROC(highbank_smc1)
index 0021f726b153210b04b876cabc7d36c61663c2a1..eff4db5de0ddb8abbfde826cc14c16b9432c5373 100644 (file)
@@ -477,6 +477,7 @@ config MACH_MX31_3DS
        select IMX_HAVE_PLATFORM_IMX2_WDT
        select IMX_HAVE_PLATFORM_IMX_I2C
        select IMX_HAVE_PLATFORM_IMX_KEYPAD
+       select IMX_HAVE_PLATFORM_IMX_SSI
        select IMX_HAVE_PLATFORM_IMX_UART
        select IMX_HAVE_PLATFORM_IPU_CORE
        select IMX_HAVE_PLATFORM_MXC_EHCI
index 0f0beb580b73f2d6ecff6b2aa70f5115d9776532..516ddee1948e81dc207d1b91c29380452c6c970d 100644 (file)
@@ -108,8 +108,7 @@ int __init mx1_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
        clk_register_clkdev(clk[clko], "clko", NULL);
 
-       mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
-                       MX1_TIM1_INT);
+       mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT);
 
        return 0;
 }
index 4e4f384ee8ddf6562c39485c1fede1b7e7da6425..ea13e61bd5f36163d3b1372c16dbc0ae7c0d90a5 100644 (file)
@@ -180,7 +180,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href)
        clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
        clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
 
-       mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
-                       MX21_INT_GPT1);
+       mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1);
+
        return 0;
 }
index d9833bb5fd610a3e9fdedff0cdc58a8724754ea9..fdd8cc87c9feee388ca8a94b0fb46fa20305f33a 100644 (file)
@@ -243,6 +243,6 @@ int __init mx25_clocks_init(void)
        clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
        clk_register_clkdev(clk[iim_ipg], "iim", NULL);
 
-       mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+       mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
        return 0;
 }
index 50a7ebd8d1b211f4790437fa6e74f9547cb9883c..295cbd7c08dcd9df383b8fd7eb5a1843083e895a 100644 (file)
@@ -263,8 +263,7 @@ int __init mx27_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
        clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
 
-       mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
-                       MX27_INT_GPT1);
+       mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
 
        clk_prepare_enable(clk[emi_ahb_gate]);
 
index a854b9cae5ea505f3c2586224eb1f12651ffad93..c9a06d800f8ef7a0a7d96e015cd87754afd09a25 100644 (file)
@@ -175,8 +175,7 @@ int __init mx31_clocks_init(unsigned long fref)
        mx31_revision();
        clk_disable_unprepare(clk[iim_gate]);
 
-       mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
-                       MX31_INT_GPT);
+       mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT);
 
        return 0;
 }
index a9e60bf7dd75ca7bf8419ba2318d9eaffb7484a0..c6422fb10bae37756693f3323f79df62e4fc932e 100644 (file)
@@ -201,7 +201,6 @@ int __init mx35_clocks_init()
                        pr_err("i.MX35 clk %d: register failed with %ld\n",
                                i, PTR_ERR(clk[i]));
 
-
        clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
        clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
        clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -264,14 +263,20 @@ int __init mx35_clocks_init()
        clk_prepare_enable(clk[iim_gate]);
        clk_prepare_enable(clk[emi_gate]);
 
+       /*
+        * SCC is needed to boot via mmc after a watchdog reset. The clock code
+        * before conversion to common clk also enabled UART1 (which isn't
+        * handled here and not needed for mmc) and IIM (which is enabled
+        * unconditionally above).
+        */
+       clk_prepare_enable(clk[scc_gate]);
+
        imx_print_silicon_rev("i.MX35", mx35_revision());
 
 #ifdef CONFIG_MXC_USE_EPIT
-       epit_timer_init(&epit1_clk,
-                       MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+       epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
 #else
-       mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
-                       MX35_INT_GPT);
+       mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
 #endif
 
        return 0;
index fcd94f3b0f0e7cf4380a47e80d0425c71a00f02d..a2200c77bf70dcdc09c44d642fe09656f36f0fcb 100644 (file)
@@ -104,12 +104,12 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
                                periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
        clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
                                main_bus_sel, ARRAY_SIZE(main_bus_sel));
-       clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
+       clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
                                per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
        clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
        clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
        clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
-       clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
+       clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
                                per_root_sel, ARRAY_SIZE(per_root_sel));
        clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
        clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
@@ -172,7 +172,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
        clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
        clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
-       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
+       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18);
        clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
        clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
        clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
@@ -366,8 +366,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
        clk_set_rate(clk[esdhc_b_podf], 166250000);
 
        /* System timer */
-       mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
-               MX51_INT_GPT);
+       mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
 
        clk_prepare_enable(clk[iim_gate]);
        imx_print_silicon_rev("i.MX51", mx51_revision());
@@ -452,8 +451,7 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
        clk_set_rate(clk[esdhc_b_podf], 200000000);
 
        /* System timer */
-       mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
-               MX53_INT_GPT);
+       mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
 
        clk_prepare_enable(clk[iim_gate]);
        imx_print_silicon_rev("i.MX53", mx53_revision());
index cab02d0a15d60d1c1bfe6e16157b386c0fb6b062..e1a17ac7b3b48419a8ff1f9dadfa5de03f5fd852 100644 (file)
@@ -122,10 +122,6 @@ static const char *cko1_sels[]     = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5
                                    "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
                                    "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
 
-static const char * const clks_init_on[] __initconst = {
-       "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
-};
-
 enum mx6q_clks {
        dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
        pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
@@ -156,16 +152,20 @@ enum mx6q_clks {
        ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
        usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
        pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
-       ssi2_ipg, ssi3_ipg, clk_max
+       ssi2_ipg, ssi3_ipg, rom,
+       clk_max
 };
 
 static struct clk *clk[clk_max];
 
+static enum mx6q_clks const clks_init_on[] __initconst = {
+       mmdc_ch0_axi, rom,
+};
+
 int __init mx6q_clocks_init(void)
 {
        struct device_node *np;
        void __iomem *base;
-       struct clk *c;
        int i, irq;
 
        clk[dummy] = imx_clk_fixed("dummy", 0);
@@ -365,6 +365,7 @@ int __init mx6q_clocks_init(void)
        clk[gpmi_bch]     = imx_clk_gate2("gpmi_bch",      "usdhc4",            base + 0x78, 26);
        clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
        clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+       clk[rom]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
        clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
        clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
        clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
@@ -424,21 +425,14 @@ int __init mx6q_clocks_init(void)
        clk_register_clkdev(clk[ahb], "ahb", NULL);
        clk_register_clkdev(clk[cko1], "cko1", NULL);
 
-       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
-               c = clk_get_sys(clks_init_on[i], NULL);
-               if (IS_ERR(c)) {
-                       pr_err("%s: failed to get clk %s", __func__,
-                              clks_init_on[i]);
-                       return PTR_ERR(c);
-               }
-               clk_prepare_enable(c);
-       }
+       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+               clk_prepare_enable(clk[clks_init_on[i]]);
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
        base = of_iomap(np, 0);
        WARN_ON(!base);
        irq = irq_of_parse_and_map(np, 0);
-       mxc_timer_init(NULL, base, irq);
+       mxc_timer_init(base, irq);
 
        return 0;
 }
index 4685919deb633f2427d0eb14da32097108e7fe16..0440379e36284c03f3494b0b87f23a95c0f980b0 100644 (file)
@@ -74,30 +74,15 @@ struct clk_pllv2 {
        void __iomem    *base;
 };
 
-static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
+static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
+               u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn)
 {
        long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
-       unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
-       void __iomem *pllbase;
+       unsigned long dbl;
        s64 temp;
-       struct clk_pllv2 *pll = to_clk_pllv2(hw);
-
-       pllbase = pll->base;
 
-       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
        dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
 
-       if (pll_hfsm == 0) {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
-       } else {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
-       }
        pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
        mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
        mfi = (mfi <= 5) ? 5 : mfi;
@@ -123,18 +108,30 @@ static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
        return temp;
 }
 
-static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
+       u32 dp_op, dp_mfd, dp_mfn, dp_ctl;
+       void __iomem *pllbase;
        struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+       pllbase = pll->base;
+
+       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+       dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+       dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+       dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+
+       return __clk_pllv2_recalc_rate(parent_rate, dp_ctl, dp_op, dp_mfd, dp_mfn);
+}
+
+static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
+               u32 *dp_op, u32 *dp_mfd, u32 *dp_mfn)
+{
        u32 reg;
-       void __iomem *pllbase;
        long mfi, pdf, mfn, mfd = 999999;
        s64 temp64;
        unsigned long quad_parent_rate;
-       unsigned long pll_hfsm, dp_ctl;
-
-       pllbase = pll->base;
 
        quad_parent_rate = 4 * parent_rate;
        pdf = mfi = -1;
@@ -144,25 +141,41 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
                return -EINVAL;
        pdf--;
 
-       temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
-       do_div(temp64, quad_parent_rate/1000000);
+       temp64 = rate * (pdf + 1) - quad_parent_rate * mfi;
+       do_div(temp64, quad_parent_rate / 1000000);
        mfn = (long)temp64;
 
+       reg = mfi << 4 | pdf;
+
+       *dp_op = reg;
+       *dp_mfd = mfd;
+       *dp_mfn = mfn;
+
+       return 0;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv2 *pll = to_clk_pllv2(hw);
+       void __iomem *pllbase;
+       u32 dp_ctl, dp_op, dp_mfd, dp_mfn;
+       int ret;
+
+       pllbase = pll->base;
+
+
+       ret = __clk_pllv2_set_rate(rate, parent_rate, &dp_op, &dp_mfd, &dp_mfn);
+       if (ret)
+               return ret;
+
        dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
        /* use dpdck0_2 */
        __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
-       if (pll_hfsm == 0) {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
-       } else {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
-       }
+
+       __raw_writel(dp_op, pllbase + MXC_PLL_DP_OP);
+       __raw_writel(dp_mfd, pllbase + MXC_PLL_DP_MFD);
+       __raw_writel(dp_mfn, pllbase + MXC_PLL_DP_MFN);
 
        return 0;
 }
@@ -170,7 +183,11 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
 static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
                unsigned long *prate)
 {
-       return rate;
+       u32 dp_op, dp_mfd, dp_mfn;
+
+       __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
+       return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
+                       dp_op, dp_mfd, dp_mfn);
 }
 
 static int clk_pllv2_prepare(struct clk_hw *hw)
index 5e11ba7daee2e34ce64d3ad903cebfe6adb151b7..5e3f1f0f4cab88189d2cebced96e5b04a5f8767c 100644 (file)
@@ -23,7 +23,7 @@
 #define MX53_DPLL1_BASE                MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR)
 #define MX53_DPLL2_BASE                MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR)
 #define MX53_DPLL3_BASE                MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
-#define MX53_DPLL4_BASE                MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
+#define MX53_DPLL4_BASE                MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR)
 
 /* PLL Register Offsets */
 #define MXC_PLL_DP_CTL                 0x00
index 89493abd497c61f67e37a8eea984e517431f224a..20ed2d56c1af6a3109ff3ea10843cda25d2a289e 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/errno.h>
 #include <asm/cacheflush.h>
+#include <asm/cp15.h>
 #include <mach/common.h>
 
 int platform_cpu_kill(unsigned int cpu)
@@ -19,6 +20,44 @@ int platform_cpu_kill(unsigned int cpu)
        return 1;
 }
 
+static inline void cpu_enter_lowpower(void)
+{
+       unsigned int v;
+
+       flush_cache_all();
+       asm volatile(
+               "mcr    p15, 0, %1, c7, c5, 0\n"
+       "       mcr     p15, 0, %1, c7, c10, 4\n"
+       /*
+        * Turn off coherency
+        */
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       bic     %0, %0, %3\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+       "       mrc     p15, 0, %0, c1, c0, 0\n"
+       "       bic     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+         : "=&r" (v)
+         : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+         : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+       unsigned int v;
+
+       asm volatile(
+               "mrc    p15, 0, %0, c1, c0, 0\n"
+       "       orr     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       orr     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+         : "=&r" (v)
+         : "Ir" (CR_C), "Ir" (0x40)
+         : "cc");
+}
+
 /*
  * platform-specific code to shutdown a CPU
  *
@@ -26,9 +65,10 @@ int platform_cpu_kill(unsigned int cpu)
  */
 void platform_cpu_die(unsigned int cpu)
 {
-       flush_cache_all();
+       cpu_enter_lowpower();
        imx_enable_cpu(cpu, false);
        cpu_do_idle();
+       cpu_leave_lowpower();
 
        /* We should never return from idle */
        panic("cpu %d unexpectedly exit from shutdown\n", cpu);
index ed38d03c61f22296acb782e7e4cc9c8dfde20344..eee0cc8d92a43e84300723b05e6e9bb86bcb84f1 100644 (file)
@@ -29,6 +29,7 @@ static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = {
        OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI2_BASE_ADDR, "imx27-cspi.1", NULL),
        OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI3_BASE_ADDR, "imx27-cspi.2", NULL),
        OF_DEV_AUXDATA("fsl,imx27-wdt", MX27_WDOG_BASE_ADDR, "imx2-wdt.0", NULL),
+       OF_DEV_AUXDATA("fsl,imx27-nand", MX27_NFC_BASE_ADDR, "mxc_nand.0", NULL),
        { /* sentinel */ }
 };
 
index c515f8ede1a145a68a0345ae2d503e02949a9d14..6450303f1a7ac01e701adf8b886248a981c54856 100644 (file)
@@ -70,7 +70,6 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
                I2C_BOARD_INFO("pcf8563", 0x51),
        }, {
                I2C_BOARD_INFO("tsc2007", 0x48),
-               .type           = "tsc2007",
                .platform_data  = &tsc2007_info,
                .irq            = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO),
        },
index ac50f1671e381447d7fda9335d32cac33432b7ca..1e09de50cbcdc9292ad762d6228c7663fbfcc4a2 100644 (file)
@@ -142,7 +142,6 @@ static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = {
                I2C_BOARD_INFO("pcf8563", 0x51),
        }, {
                I2C_BOARD_INFO("tsc2007", 0x49),
-               .type           = "tsc2007",
                .platform_data  = &tsc2007_info,
        },
 };
index dff82eb57cd9f8957d1db67689ae5d58d7a4884c..ba09552fe5feee5b6720e3b294449e6d46bfeddf 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
 #include <mach/common.h>
 #include <mach/iomux-mx27.h>
 
@@ -116,6 +116,8 @@ static const int visstrim_m10_pins[] __initconst = {
        PB23_PF_USB_PWR,
        PB24_PF_USB_OC,
        /* CSI */
+       TVP5150_RSTN | GPIO_GPIO | GPIO_OUT,
+       TVP5150_PWDN | GPIO_GPIO | GPIO_OUT,
        PB10_PF_CSI_D0,
        PB11_PF_CSI_D1,
        PB12_PF_CSI_D2,
@@ -147,6 +149,24 @@ static struct gpio visstrim_m10_version_gpios[] = {
        { MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" },
 };
 
+static const struct gpio visstrim_m10_gpios[] __initconst = {
+       {
+               .gpio = TVP5150_RSTN,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
+               .label = "tvp5150_rstn",
+       },
+       {
+               .gpio = TVP5150_PWDN,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+               .label = "tvp5150_pwdn",
+       },
+       {
+               .gpio = OTG_PHY_CS_GPIO,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+               .label = "usbotg_cs",
+       },
+};
+
 /* Camera */
 static int visstrim_camera_power(struct device *dev, int on)
 {
@@ -190,13 +210,6 @@ static void __init visstrim_camera_init(void)
        struct platform_device *pdev;
        int dma;
 
-       /* Initialize tvp5150 gpios */
-       mxc_gpio_mode(TVP5150_RSTN | GPIO_GPIO | GPIO_OUT);
-       mxc_gpio_mode(TVP5150_PWDN | GPIO_GPIO | GPIO_OUT);
-       gpio_set_value(TVP5150_RSTN, 1);
-       gpio_set_value(TVP5150_PWDN, 0);
-       ndelay(1);
-
        gpio_set_value(TVP5150_PWDN, 1);
        ndelay(1);
        gpio_set_value(TVP5150_RSTN, 0);
@@ -377,10 +390,6 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
 /* USB OTG */
 static int otg_phy_init(struct platform_device *pdev)
 {
-       gpio_set_value(OTG_PHY_CS_GPIO, 0);
-
-       mdelay(10);
-
        return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
 }
 
@@ -435,6 +444,11 @@ static void __init visstrim_m10_board_init(void)
        if (ret)
                pr_err("Failed to setup pins (%d)\n", ret);
 
+       ret = gpio_request_array(visstrim_m10_gpios,
+                               ARRAY_SIZE(visstrim_m10_gpios));
+       if (ret)
+               pr_err("Failed to request gpios (%d)\n", ret);
+
        imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
        imx27_add_imx_uart0(&uart_pdata);
 
index d14bbe949a4f2575a13227b6a629fd177774492f..3e7401fca76c7e18860261a7541449e3c75b5c9d 100644 (file)
@@ -32,7 +32,7 @@
  * Memory-mapped I/O on MX21ADS base board
  */
 #define MX21ADS_MMIO_BASE_ADDR   0xf5000000
-#define MX21ADS_MMIO_SIZE        SZ_16M
+#define MX21ADS_MMIO_SIZE        0xc00000
 
 #define MX21ADS_REG_ADDR(offset)    (void __force __iomem *) \
                (MX21ADS_MMIO_BASE_ADDR + (offset))
index 967ed5b35a45914b3e26678db38bc0aaf0868faa..a8983b9778d1b7873dd850076718eabd690ca6f1 100644 (file)
@@ -86,6 +86,7 @@ static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
 
 void __init imx3_init_l2x0(void)
 {
+#ifdef CONFIG_CACHE_L2X0
        void __iomem *l2x0_base;
        void __iomem *clkctl_base;
 
@@ -115,6 +116,7 @@ void __init imx3_init_l2x0(void)
        }
 
        l2x0_init(l2x0_base, 0x00030024, 0x00000000);
+#endif
 }
 
 #ifdef CONFIG_SOC_IMX31
@@ -179,6 +181,8 @@ void __init imx31_soc_init(void)
        mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
        mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
 
+       pinctrl_provide_dummies();
+
        if (to_version == 1) {
                strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin",
                        strlen(imx31_sdma_pdata.fw_name));
index feeee17da96b227b769c1748c6175b6071b8bd59..1d003053d5621bd45341030b9f58db08f78990ac 100644 (file)
@@ -202,6 +202,8 @@ void __init imx51_soc_init(void)
        mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
        mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
 
+       pinctrl_provide_dummies();
+
        /* i.mx51 has the i.mx35 type sdma */
        imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata);
 
index ebbd7fc90eb47488b320de65feee82ce578ff288..a9f80943d01fe8b468a0cba2a78cf8f05a51cac7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/clockchips.h>
 #include <linux/io.h>
 #include <linux/export.h>
+#include <linux/gpio.h>
 
 #include <mach/udc.h>
 #include <mach/hardware.h>
@@ -107,7 +108,7 @@ static signed char irq2gpio[32] = {
         7,  8,  9, 10, 11, 12, -1, -1,
 };
 
-int gpio_to_irq(int gpio)
+static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
 {
        int irq;
 
@@ -117,7 +118,6 @@ int gpio_to_irq(int gpio)
        }
        return -EINVAL;
 }
-EXPORT_SYMBOL(gpio_to_irq);
 
 int irq_to_gpio(unsigned int irq)
 {
@@ -383,12 +383,56 @@ static struct platform_device *ixp46x_devices[] __initdata = {
 unsigned long ixp4xx_exp_bus_size;
 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
 
+static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+       gpio_line_config(gpio, IXP4XX_GPIO_IN);
+
+       return 0;
+}
+
+static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+                                       int level)
+{
+       gpio_line_set(gpio, level);
+       gpio_line_config(gpio, IXP4XX_GPIO_OUT);
+
+       return 0;
+}
+
+static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+       int value;
+
+       gpio_line_get(gpio, &value);
+
+       return value;
+}
+
+static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+                                 int value)
+{
+       gpio_line_set(gpio, value);
+}
+
+static struct gpio_chip ixp4xx_gpio_chip = {
+       .label                  = "IXP4XX_GPIO_CHIP",
+       .direction_input        = ixp4xx_gpio_direction_input,
+       .direction_output       = ixp4xx_gpio_direction_output,
+       .get                    = ixp4xx_gpio_get_value,
+       .set                    = ixp4xx_gpio_set_value,
+       .to_irq                 = ixp4xx_gpio_to_irq,
+       .base                   = 0,
+       .ngpio                  = 16,
+};
+
 void __init ixp4xx_sys_init(void)
 {
        ixp4xx_exp_bus_size = SZ_16M;
 
        platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
 
+       gpiochip_add(&ixp4xx_gpio_chip);
+
        if (cpu_is_ixp46x()) {
                int region;
 
index 83d6b4ed60bbd42f3e912d68611205d720ad98a1..ef37f2635b0e4a5812ab30ce065525e3853a44d8 100644 (file)
@@ -1,79 +1,2 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/gpio.h
- *
- * IXP4XX GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
- * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_IXP4XX_GPIO_H
-#define __ASM_ARCH_IXP4XX_GPIO_H
-
-#include <linux/kernel.h>
-#include <mach/hardware.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-       return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-       might_sleep();
-
-       return;
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
-       gpio_line_config(gpio, IXP4XX_GPIO_IN);
-       return 0;
-}
-
-static inline int gpio_direction_output(unsigned gpio, int level)
-{
-       gpio_line_set(gpio, level);
-       gpio_line_config(gpio, IXP4XX_GPIO_OUT);
-       return 0;
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
-       int value;
-
-       gpio_line_get(gpio, &value);
-
-       return value;
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-       gpio_line_set(gpio, value);
-}
-
-#include <asm-generic/gpio.h>                  /* cansleep wrappers */
-
-extern int gpio_to_irq(int gpio);
-#define gpio_to_irq gpio_to_irq
-extern int irq_to_gpio(unsigned int irq);
-
-#endif
+/* empty */
 
index 3d742aee177304250d78aa775d69475364a94866..108a9d3f382da148ff3b6cf281440ab4a302d1c4 100644 (file)
@@ -60,8 +60,6 @@ static struct platform_device ixdp425_flash = {
 #if defined(CONFIG_MTD_NAND_PLATFORM) || \
     defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition ixdp425_partitions[] = {
        {
                .name   = "ixp400 NAND FS 0",
@@ -100,8 +98,6 @@ static struct platform_nand_data ixdp425_flash_nand_data = {
        .chip = {
                .nr_chips               = 1,
                .chip_delay             = 30,
-               .options                = NAND_NO_AUTOINCR,
-               .part_probe_types       = part_probes,
                .partitions             = ixdp425_partitions,
                .nr_partitions          = ARRAY_SIZE(ixdp425_partitions),
        },
index 2222c57395198f6535d49cc8a2122e756cbba7b2..b0d3cc49269def6d8ab668af574fd8341ec5aca8 100644 (file)
@@ -20,9 +20,6 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
index 25fb3fd418efbe7e30b94136d920fd851e371de8..f261cd2426434aac74e2856c6069c9052d6f035f 100644 (file)
@@ -159,6 +159,7 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
        gate_fn->gate.flags = clk_gate_flags;
        gate_fn->gate.lock = lock;
        gate_fn->gate.hw.init = &init;
+       gate_fn->fn = fn;
 
        /* ops is the gate ops, but with our disable function */
        if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
@@ -193,9 +194,11 @@ static struct clk __init *kirkwood_register_gate_fn(const char *name,
                                    bit_idx, 0, &gating_lock, fn);
 }
 
+static struct clk *ge0, *ge1;
+
 void __init kirkwood_clk_init(void)
 {
-       struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
+       struct clk *runit, *sata0, *sata1, *usb0, *sdio;
        struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
 
        tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
@@ -257,6 +260,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
        orion_ge00_init(eth_data,
                        GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
                        IRQ_KIRKWOOD_GE00_ERR);
+       /* The interface forgets the MAC address assigned by u-boot if
+       the clock is turned off, so claim the clk now. */
+       clk_prepare_enable(ge0);
 }
 
 
@@ -268,6 +274,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
        orion_ge01_init(eth_data,
                        GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
                        IRQ_KIRKWOOD_GE01_ERR);
+       clk_prepare_enable(ge1);
 }
 
 
index 3eee37a3b501a81e594fed45997301ae27f67ce4..a115142f8690bedf3bde2f10ad6f868b400a85dd 100644 (file)
@@ -38,6 +38,7 @@
 #define IRQ_MASK_HIGH_OFF      0x0014
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE                (BRIDGE_PHYS_BASE | 0x0300)
 
 #define L2_CONFIG_REG          (BRIDGE_VIRT_BASE | 0x0128)
 #define L2_WRITETHROUGH                0x00000010
index fede3d503efa0ef505b910494b4c96b5a9fa496d..c5b68510776b71c75c2731fcf5253a4114597c30 100644 (file)
@@ -80,6 +80,7 @@
 #define  UART1_VIRT_BASE       (DEV_BUS_VIRT_BASE | 0x2100)
 
 #define BRIDGE_VIRT_BASE       (KIRKWOOD_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE       (KIRKWOOD_REGS_PHYS_BASE | 0x20000)
 
 #define CRYPTO_PHYS_BASE       (KIRKWOOD_REGS_PHYS_BASE | 0x30000)
 
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
deleted file mode 100644 (file)
index 0e135a5..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_GPIO_PXA_H
-#define __ASM_MACH_GPIO_PXA_H
-
-#include <mach/addr-map.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-
-#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
-
-#define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x)    (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-#define gpio_to_bank(gpio)     ((gpio) >> 5)
-
-/* NOTE: these macros are defined here to make optimization of
- * gpio_{get,set}_value() to work when 'gpio' is a constant.
- * Usage of these macros otherwise is no longer recommended,
- * use generic GPIO API whenever possible.
- */
-#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
-
-#define GPLR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
-#define GPDR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
-#define GPSR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
-#define GPCR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
-
-#include <plat/gpio-pxa.h>
-
-#endif /* __ASM_MACH_GPIO_PXA_H */
index fcfe0e3bd7016631b5086cea80279b1d4db4a889..e60c7d98922b9b7876f561cc572a7b90bde28d73 100644 (file)
@@ -241,6 +241,7 @@ void __init mmp2_init_icu(void)
        icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
        icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
        icu_data[1].nr_irqs = 2;
+       icu_data[1].cascade_irq = 4;
        icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
        icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
                                                   icu_data[1].virq_base, 0,
@@ -249,6 +250,7 @@ void __init mmp2_init_icu(void)
        icu_data[2].reg_status = mmp_icu_base + 0x154;
        icu_data[2].reg_mask = mmp_icu_base + 0x16c;
        icu_data[2].nr_irqs = 2;
+       icu_data[2].cascade_irq = 5;
        icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
        icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
                                                   icu_data[2].virq_base, 0,
@@ -257,6 +259,7 @@ void __init mmp2_init_icu(void)
        icu_data[3].reg_status = mmp_icu_base + 0x180;
        icu_data[3].reg_mask = mmp_icu_base + 0x17c;
        icu_data[3].nr_irqs = 3;
+       icu_data[3].cascade_irq = 9;
        icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
        icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
                                                   icu_data[3].virq_base, 0,
@@ -265,6 +268,7 @@ void __init mmp2_init_icu(void)
        icu_data[4].reg_status = mmp_icu_base + 0x158;
        icu_data[4].reg_mask = mmp_icu_base + 0x170;
        icu_data[4].nr_irqs = 5;
+       icu_data[4].cascade_irq = 17;
        icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
        icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
                                                   icu_data[4].virq_base, 0,
@@ -273,6 +277,7 @@ void __init mmp2_init_icu(void)
        icu_data[5].reg_status = mmp_icu_base + 0x15c;
        icu_data[5].reg_mask = mmp_icu_base + 0x174;
        icu_data[5].nr_irqs = 15;
+       icu_data[5].cascade_irq = 35;
        icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
        icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
                                                   icu_data[5].virq_base, 0,
@@ -281,6 +286,7 @@ void __init mmp2_init_icu(void)
        icu_data[6].reg_status = mmp_icu_base + 0x160;
        icu_data[6].reg_mask = mmp_icu_base + 0x178;
        icu_data[6].nr_irqs = 2;
+       icu_data[6].cascade_irq = 51;
        icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
        icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
                                                   icu_data[6].virq_base, 0,
@@ -289,6 +295,7 @@ void __init mmp2_init_icu(void)
        icu_data[7].reg_status = mmp_icu_base + 0x188;
        icu_data[7].reg_mask = mmp_icu_base + 0x184;
        icu_data[7].nr_irqs = 2;
+       icu_data[7].cascade_irq = 55;
        icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
        icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
                                                   icu_data[7].virq_base, 0,
index c64dbb96dbad53a4264b8559ec531f229b75d08b..eb187e0e059bdbb1459b2ae21d69b6050176cba2 100644 (file)
@@ -31,5 +31,6 @@
 #define IRQ_MASK_HIGH_OFF      0x0014
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE                (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index 3674497162e3efa3b2238360fda132e9761a4d21..e807c4c52a0b6331a4e02146f71edc127d95cb7f 100644 (file)
@@ -42,6 +42,7 @@
 #define MV78XX0_CORE0_REGS_PHYS_BASE   0xf1020000
 #define MV78XX0_CORE1_REGS_PHYS_BASE   0xf1024000
 #define MV78XX0_CORE_REGS_VIRT_BASE    0xfe400000
+#define MV78XX0_CORE_REGS_PHYS_BASE    0xfe400000
 #define MV78XX0_CORE_REGS_SIZE         SZ_16K
 
 #define MV78XX0_PCIE_IO_PHYS_BASE(i)   (0xf0800000 + ((i) << 20))
@@ -59,6 +60,7 @@
  * Core-specific peripheral registers.
  */
 #define BRIDGE_VIRT_BASE       (MV78XX0_CORE_REGS_VIRT_BASE)
+#define BRIDGE_PHYS_BASE       (MV78XX0_CORE_REGS_PHYS_BASE)
 
 /*
  * Register Map
index 5e90b9dcdef8789e10d71d2a8500604d53e57eeb..f5f061757deb54371e599730d8f38fc0b15c100e 100644 (file)
@@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
        return 0;
 }
 
+static void __init apx4devkit_fec_phy_clk_enable(void)
+{
+       struct clk *clk;
+
+       /* Enable fec phy clock */
+       clk = clk_get_sys("enet_out", NULL);
+       if (!IS_ERR(clk))
+               clk_prepare_enable(clk);
+}
+
 static void __init apx4devkit_init(void)
 {
        mx28_soc_init();
@@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
        phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
                        apx4devkit_phy_fixup);
 
+       apx4devkit_fec_phy_clk_enable();
        mx28_add_fec(0, &mx28_fec_pdata);
 
        mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
index 58cacafcf6628758a27623abb8f8be1b1b240317..2e8d3e176bc70b7216b5b8aa818b648ae63ebee0 100644 (file)
@@ -111,7 +111,7 @@ static struct nomadik_nand_platform_data nhk8815_nand_data = {
        .parts          = nhk8815_partitions,
        .nparts         = ARRAY_SIZE(nhk8815_partitions),
        .options        = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \
-                       | NAND_NO_READRDY | NAND_NO_AUTOINCR,
+                       | NAND_NO_READRDY,
        .init           = nhk8815_nand_init,
 };
 
index c7364fdbda05370f05a84277cbcf3b494daf897d..6872f3fd400ffd242029b67f8688fda2853b6899 100644 (file)
@@ -192,14 +192,11 @@ static int nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_data = {
        .chip   = {
                .nr_chips               = 1,
                .chip_offset            = 0,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
index 7e503686f7af2b2635698c112f350ffd6ce72dd8..a28e989a63f4369f94cd0081e11cb4f28f407215 100644 (file)
@@ -186,8 +186,6 @@ static int h2_nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(H2_NAND_RB_GPIO_PIN);
 }
 
-static const char *h2_part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data h2_nand_platdata = {
        .chip   = {
                .nr_chips               = 1,
@@ -195,7 +193,6 @@ static struct platform_nand_data h2_nand_platdata = {
                .nr_partitions          = ARRAY_SIZE(h2_nand_partitions),
                .partitions             = h2_nand_partitions,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = h2_part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
index 9fb03f189d934bce14ce17f37cf53f99b3551472..108a8640fc6f04c2c42c0accc0d6602716831ef0 100644 (file)
@@ -188,8 +188,6 @@ static int nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(H3_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_platdata = {
        .chip   = {
                .nr_chips               = 1,
@@ -197,7 +195,6 @@ static struct platform_nand_data nand_platdata = {
                .nr_partitions          = ARRAY_SIZE(nand_partitions),
                .partitions             = nand_partitions,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
index f2cb24387c229d059a4a3af3dcffef386540f7d6..703d55ecffe2b2411c77af5d262a3372e5431a86 100644 (file)
@@ -150,14 +150,11 @@ static int nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(P2_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_data = {
        .chip   = {
                .nr_chips               = 1,
                .chip_offset            = 0,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
index 70a81f900bb5514d813c9f57c38ca64656798c4b..53c39d239d6e202c896ff9390eedfc0c24e60647 100644 (file)
@@ -97,11 +97,6 @@ __init board_onenand_init(struct mtd_partition *onenand_parts,
 
        gpmc_onenand_init(&board_onenand_data);
 }
-#else
-void
-__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-}
 #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
 
 #if defined(CONFIG_MTD_NAND_OMAP2) || \
index 8ca14e88a31af12f43d219fe6d8a6b63b61ec4f1..2c5d0ed75285153d8126e60a030f08630a277635 100644 (file)
@@ -83,11 +83,9 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data tusb_data = {
-#if defined(CONFIG_USB_MUSB_OTG)
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
-       .mode           = MUSB_PERIPHERAL,
-#else /* defined(CONFIG_USB_MUSB_HOST) */
+#else
        .mode           = MUSB_HOST,
 #endif
        .set_power      = tusb_set_power,
index 79c6909eeb785ef24d71be68f076034c0cd476df..580fd17208dacc24dd51bbff63507763c0612730 100644 (file)
@@ -81,13 +81,13 @@ static u8 omap3_beagle_version;
 static struct {
        int mmc1_gpio_wp;
        int usb_pwr_level;
-       int reset_gpio;
+       int dvi_pd_gpio;
        int usr_button_gpio;
        int mmc_caps;
 } beagle_config = {
        .mmc1_gpio_wp = -EINVAL,
        .usb_pwr_level = GPIOF_OUT_INIT_LOW,
-       .reset_gpio = 129,
+       .dvi_pd_gpio = -EINVAL,
        .usr_button_gpio = 4,
        .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
 };
@@ -126,21 +126,21 @@ static void __init omap3_beagle_init_rev(void)
                printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
                beagle_config.mmc1_gpio_wp = 29;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 6:
                printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
                beagle_config.mmc1_gpio_wp = 23;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 5:
                printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
                beagle_config.mmc1_gpio_wp = 23;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 0:
@@ -274,11 +274,9 @@ static int beagle_twl_gpio_setup(struct device *dev,
                if (r)
                        pr_err("%s: unable to configure nDVI_PWR_EN\n",
                                __func__);
-               r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH,
-                                    "DVI_LDO_EN");
-               if (r)
-                       pr_err("%s: unable to configure DVI_LDO_EN\n",
-                               __func__);
+
+               beagle_config.dvi_pd_gpio = gpio + 2;
+
        } else {
                /*
                 * REVISIT: need ehci-omap hooks for external VBUS
@@ -287,7 +285,7 @@ static int beagle_twl_gpio_setup(struct device *dev,
                if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
                        pr_err("%s: unable to configure EHCI_nOC\n", __func__);
        }
-       dvi_panel.power_down_gpio = beagle_config.reset_gpio;
+       dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio;
 
        gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level,
                        "nEN_USB_PWR");
@@ -499,7 +497,7 @@ static void __init omap3_beagle_init(void)
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
        omap3_beagle_init_rev();
 
-       if (beagle_config.mmc1_gpio_wp != -EINVAL)
+       if (gpio_is_valid(beagle_config.mmc1_gpio_wp))
                omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT);
        mmc[0].caps = beagle_config.mmc_caps;
        omap_hsmmc_init(mmc);
@@ -510,15 +508,13 @@ static void __init omap3_beagle_init(void)
 
        platform_add_devices(omap3_beagle_devices,
                        ARRAY_SIZE(omap3_beagle_devices));
+       if (gpio_is_valid(beagle_config.dvi_pd_gpio))
+               omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
        omap_display_init(&beagle_dss_data);
        omap_serial_init();
        omap_sdrc_init(mt46h32m32lf6_sdrc_params,
                                  mt46h32m32lf6_sdrc_params);
 
-       omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-       /* REVISIT leave DVI powered down until it's needed ... */
-       gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
-
        usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
        omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
index 8fa2fc3a4c3c51e973eb07c91f3c687c86aca62a..779734d8ba37304417350cd246c52044a0238897 100644 (file)
@@ -494,8 +494,8 @@ static void __init overo_init(void)
 
        regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-       omap_hsmmc_init(mmc);
        overo_i2c_init();
+       omap_hsmmc_init(mmc);
        omap_display_init(&overo_dss_data);
        omap_serial_init();
        omap_sdrc_init(mt46h32m32lf6_sdrc_params,
index ff53deccecab09c588b83b663dc5cb781d278c2b..df2534de3361d5b9c2317db5cec7222e5993d2fb 100644 (file)
@@ -144,7 +144,6 @@ static struct lis3lv02d_platform_data rx51_lis3lv02d_data = {
        .release_resources = lis302_release,
        .st_min_limits = {-32, 3, 3},
        .st_max_limits = {-3, 32, 32},
-       .irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO),
 };
 #endif
 
@@ -1030,7 +1029,6 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
        {
                I2C_BOARD_INFO("lis3lv02d", 0x1d),
                .platform_data = &rx51_lis3lv02d_data,
-               .irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO),
        },
 #endif
 };
@@ -1056,6 +1054,10 @@ static int __init rx51_i2c_init(void)
        omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
        omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
+#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
+       rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO);
+       rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO);
+#endif
        omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
        return 0;
index 4e1a3b0e8cc83d5d505abf7d789273c4f1a970a0..1efdec236ae89dac6671bbd8f09d7cffb2067deb 100644 (file)
@@ -3514,7 +3514,7 @@ int __init omap3xxx_clk_init(void)
        struct omap_clk *c;
        u32 cpu_clkflg = 0;
 
-       if (cpu_is_omap3517()) {
+       if (soc_is_am35xx()) {
                cpu_mask = RATE_IN_34XX;
                cpu_clkflg = CK_AM35XX;
        } else if (cpu_is_omap3630()) {
index 2172f660384889535c6d812dae30dacf56b18eec..ba6f9a0a43e9096776963ea1285eea34fb1a24f8 100644 (file)
@@ -84,6 +84,7 @@ static struct clk slimbus_clk = {
 
 static struct clk sys_32k_ck = {
        .name           = "sys_32k_ck",
+       .clkdm_name     = "prm_clkdm",
        .rate           = 32768,
        .ops            = &clkops_null,
 };
@@ -512,6 +513,7 @@ static struct clk ddrphy_ck = {
        .name           = "ddrphy_ck",
        .parent         = &dpll_core_m2_ck,
        .ops            = &clkops_null,
+       .clkdm_name     = "l3_emif_clkdm",
        .fixed_div      = 2,
        .recalc         = &omap_fixed_divisor_recalc,
 };
@@ -769,6 +771,7 @@ static const struct clksel dpll_mpu_m2_div[] = {
 static struct clk dpll_mpu_m2_ck = {
        .name           = "dpll_mpu_m2_ck",
        .parent         = &dpll_mpu_ck,
+       .clkdm_name     = "cm_clkdm",
        .clksel         = dpll_mpu_m2_div,
        .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_MPU,
        .clksel_mask    = OMAP4430_DPLL_CLKOUT_DIV_MASK,
@@ -1149,6 +1152,7 @@ static const struct clksel l3_div_div[] = {
 static struct clk l3_div_ck = {
        .name           = "l3_div_ck",
        .parent         = &div_core_ck,
+       .clkdm_name     = "cm_clkdm",
        .clksel         = l3_div_div,
        .clksel_reg     = OMAP4430_CM_CLKSEL_CORE,
        .clksel_mask    = OMAP4430_CLKSEL_L3_MASK,
@@ -2824,6 +2828,7 @@ static const struct clksel trace_clk_div_div[] = {
 static struct clk trace_clk_div_ck = {
        .name           = "trace_clk_div_ck",
        .parent         = &pmd_trace_clk_mux_ck,
+       .clkdm_name     = "emu_sys_clkdm",
        .clksel         = trace_clk_div_div,
        .clksel_reg     = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
        .clksel_mask    = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
@@ -3412,9 +3417,12 @@ int __init omap4xxx_clk_init(void)
        if (cpu_is_omap443x()) {
                cpu_mask = RATE_IN_4430;
                cpu_clkflg = CK_443X;
-       } else if (cpu_is_omap446x()) {
+       } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
                cpu_mask = RATE_IN_4460 | RATE_IN_4430;
                cpu_clkflg = CK_446X | CK_443X;
+
+               if (cpu_is_omap447x())
+                       pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
        } else {
                return 0;
        }
index f7b58609bad888b6b276a524badf9518e450a709..6227e9505c2db6858c44bf2b9f30e0abe7ec0023 100644 (file)
  *
  * CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
  *     clockdomain.  (Currently, this applies to OMAP3 clockdomains only.)
+ * CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
+ *     active whenever the MPU is active.  True for interconnects and
+ *     the WKUP clockdomains.
  */
 #define CLKDM_CAN_FORCE_SLEEP                  (1 << 0)
 #define CLKDM_CAN_FORCE_WAKEUP                 (1 << 1)
 #define CLKDM_CAN_ENABLE_AUTO                  (1 << 2)
 #define CLKDM_CAN_DISABLE_AUTO                 (1 << 3)
 #define CLKDM_NO_AUTODEPS                      (1 << 4)
+#define CLKDM_ACTIVE_WITH_MPU                  (1 << 5)
 
 #define CLKDM_CAN_HWSUP                (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
 #define CLKDM_CAN_SWSUP                (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
index 839145e1cfbea2bcf706e72be39da268e6778247..4972219653ce85b1bebcad5629ab92d733dfef45 100644 (file)
@@ -88,4 +88,5 @@ struct clockdomain wkup_common_clkdm = {
        .name           = "wkup_clkdm",
        .pwrdm          = { .name = "wkup_pwrdm" },
        .dep_bit        = OMAP_EN_WKUP_SHIFT,
+       .flags          = CLKDM_ACTIVE_WITH_MPU,
 };
index c534258474939e8efe3b96dd1c5ed096bd30f3ab..7f2133abe7d36e601c322fcec0a8f6217b500b46 100644 (file)
@@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
        .cm_inst          = OMAP4430_PRM_WKUP_CM_INST,
        .clkdm_offs       = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
        .dep_bit          = OMAP4430_L4WKUP_STATDEP_SHIFT,
-       .flags            = CLKDM_CAN_HWSUP,
+       .flags            = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
 };
 
 static struct clockdomain emu_sys_44xx_clkdm = {
index a7bc096bd407445ed8e248c6c9803435ddc406eb..f24e3f7a2bbc248389ac234c4b80e9f2ef4d5e03 100644 (file)
  */
 #define MAX_MODULE_READY_TIME          2000
 
+/*
+ * MAX_MODULE_DISABLE_TIME: max duration in microseconds to wait for
+ * the PRCM to request that a module enter the inactive state in the
+ * case of OMAP2 & 3.  In the case of OMAP4 this is the max duration
+ * in microseconds for the module to reach the inactive state from
+ * a functional state.
+ * XXX FSUSB on OMAP4430 takes ~4ms to idle after reset during
+ * kernel init.
+ */
+#define MAX_MODULE_DISABLE_TIME                5000
+
 #endif
index 8c86d294b1a326b6ea8bbf07239cb92983ba5f50..1a39945d9ff81fd5ed9a78f785c6fe08d3f1f939 100644 (file)
@@ -313,9 +313,9 @@ int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_off
 
        omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) ==
                           CLKCTRL_IDLEST_DISABLED),
-                         MAX_MODULE_READY_TIME, i);
+                         MAX_MODULE_DISABLE_TIME, i);
 
-       return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+       return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY;
 }
 
 /**
index db5a88a36c63418d746e6fbfd6ecb89d97e2c2e5..5fb47a14f4ba85211010afa34c439a3f1c73b30c 100644 (file)
@@ -180,16 +180,133 @@ static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
                omap4_dsi_mux_pads(dsi_id, 0);
 }
 
+static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+       return omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, tput);
+}
+
+static struct platform_device *create_dss_pdev(const char *pdev_name,
+               int pdev_id, const char *oh_name, void *pdata, int pdata_len,
+               struct platform_device *parent)
+{
+       struct platform_device *pdev;
+       struct omap_device *od;
+       struct omap_hwmod *ohs[1];
+       struct omap_hwmod *oh;
+       int r;
+
+       oh = omap_hwmod_lookup(oh_name);
+       if (!oh) {
+               pr_err("Could not look up %s\n", oh_name);
+               r = -ENODEV;
+               goto err;
+       }
+
+       pdev = platform_device_alloc(pdev_name, pdev_id);
+       if (!pdev) {
+               pr_err("Could not create pdev for %s\n", pdev_name);
+               r = -ENOMEM;
+               goto err;
+       }
+
+       if (parent != NULL)
+               pdev->dev.parent = &parent->dev;
+
+       if (pdev->id != -1)
+               dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+       else
+               dev_set_name(&pdev->dev, "%s", pdev->name);
+
+       ohs[0] = oh;
+       od = omap_device_alloc(pdev, ohs, 1, NULL, 0);
+       if (!od) {
+               pr_err("Could not alloc omap_device for %s\n", pdev_name);
+               r = -ENOMEM;
+               goto err;
+       }
+
+       r = platform_device_add_data(pdev, pdata, pdata_len);
+       if (r) {
+               pr_err("Could not set pdata for %s\n", pdev_name);
+               goto err;
+       }
+
+       r = omap_device_register(pdev);
+       if (r) {
+               pr_err("Could not register omap_device for %s\n", pdev_name);
+               goto err;
+       }
+
+       return pdev;
+
+err:
+       return ERR_PTR(r);
+}
+
+static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
+               int pdev_id, void *pdata, int pdata_len,
+               struct platform_device *parent)
+{
+       struct platform_device *pdev;
+       int r;
+
+       pdev = platform_device_alloc(pdev_name, pdev_id);
+       if (!pdev) {
+               pr_err("Could not create pdev for %s\n", pdev_name);
+               r = -ENOMEM;
+               goto err;
+       }
+
+       if (parent != NULL)
+               pdev->dev.parent = &parent->dev;
+
+       if (pdev->id != -1)
+               dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+       else
+               dev_set_name(&pdev->dev, "%s", pdev->name);
+
+       r = platform_device_add_data(pdev, pdata, pdata_len);
+       if (r) {
+               pr_err("Could not set pdata for %s\n", pdev_name);
+               goto err;
+       }
+
+       r = platform_device_add(pdev);
+       if (r) {
+               pr_err("Could not register platform_device for %s\n", pdev_name);
+               goto err;
+       }
+
+       return pdev;
+
+err:
+       return ERR_PTR(r);
+}
+
 int __init omap_display_init(struct omap_dss_board_info *board_data)
 {
        int r = 0;
-       struct omap_hwmod *oh;
        struct platform_device *pdev;
        int i, oh_count;
-       struct omap_display_platform_data pdata;
        const struct omap_dss_hwmod_data *curr_dss_hwmod;
+       struct platform_device *dss_pdev;
+
+       /* create omapdss device */
+
+       board_data->dsi_enable_pads = omap_dsi_enable_pads;
+       board_data->dsi_disable_pads = omap_dsi_disable_pads;
+       board_data->get_context_loss_count = omap_pm_get_dev_context_loss_count;
+       board_data->set_min_bus_tput = omap_dss_set_min_bus_tput;
+
+       omap_display_device.dev.platform_data = board_data;
+
+       r = platform_device_register(&omap_display_device);
+       if (r < 0) {
+               pr_err("Unable to register omapdss device\n");
+               return r;
+       }
 
-       memset(&pdata, 0, sizeof(pdata));
+       /* create devices for dss hwmods */
 
        if (cpu_is_omap24xx()) {
                curr_dss_hwmod = omap2_dss_hwmod_data;
@@ -202,39 +319,58 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
                oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
        }
 
-       if (board_data->dsi_enable_pads == NULL)
-               board_data->dsi_enable_pads = omap_dsi_enable_pads;
-       if (board_data->dsi_disable_pads == NULL)
-               board_data->dsi_disable_pads = omap_dsi_disable_pads;
-
-       pdata.board_data = board_data;
-       pdata.board_data->get_context_loss_count =
-               omap_pm_get_dev_context_loss_count;
-
-       for (i = 0; i < oh_count; i++) {
-               oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);
-               if (!oh) {
-                       pr_err("Could not look up %s\n",
-                               curr_dss_hwmod[i].oh_name);
-                       return -ENODEV;
+       /*
+        * First create the pdev for dss_core, which is used as a parent device
+        * by the other dss pdevs. Note: dss_core has to be the first item in
+        * the hwmod list.
+        */
+       dss_pdev = create_dss_pdev(curr_dss_hwmod[0].dev_name,
+                       curr_dss_hwmod[0].id,
+                       curr_dss_hwmod[0].oh_name,
+                       board_data, sizeof(*board_data),
+                       NULL);
+
+       if (IS_ERR(dss_pdev)) {
+               pr_err("Could not build omap_device for %s\n",
+                               curr_dss_hwmod[0].oh_name);
+
+               return PTR_ERR(dss_pdev);
+       }
+
+       for (i = 1; i < oh_count; i++) {
+               pdev = create_dss_pdev(curr_dss_hwmod[i].dev_name,
+                               curr_dss_hwmod[i].id,
+                               curr_dss_hwmod[i].oh_name,
+                               board_data, sizeof(*board_data),
+                               dss_pdev);
+
+               if (IS_ERR(pdev)) {
+                       pr_err("Could not build omap_device for %s\n",
+                                       curr_dss_hwmod[i].oh_name);
+
+                       return PTR_ERR(pdev);
                }
+       }
 
-               pdev = omap_device_build(curr_dss_hwmod[i].dev_name,
-                               curr_dss_hwmod[i].id, oh, &pdata,
-                               sizeof(struct omap_display_platform_data),
-                               NULL, 0, 0);
+       /* Create devices for DPI and SDI */
 
-               if (WARN((IS_ERR(pdev)), "Could not build omap_device for %s\n",
-                               curr_dss_hwmod[i].oh_name))
-                       return -ENODEV;
+       pdev = create_simple_dss_pdev("omapdss_dpi", -1,
+                       board_data, sizeof(*board_data), dss_pdev);
+       if (IS_ERR(pdev)) {
+               pr_err("Could not build platform_device for omapdss_dpi\n");
+               return PTR_ERR(pdev);
        }
-       omap_display_device.dev.platform_data = board_data;
 
-       r = platform_device_register(&omap_display_device);
-       if (r < 0)
-               printk(KERN_ERR "Unable to register OMAP-Display device\n");
+       if (cpu_is_omap34xx()) {
+               pdev = create_simple_dss_pdev("omapdss_sdi", -1,
+                               board_data, sizeof(*board_data), dss_pdev);
+               if (IS_ERR(pdev)) {
+                       pr_err("Could not build platform_device for omapdss_sdi\n");
+                       return PTR_ERR(pdev);
+               }
+       }
 
-       return r;
+       return 0;
 }
 
 static void dispc_disable_outputs(void)
index 845309f146fe317fd82ec74dcc39a1158ce6d0aa..88ffa1e645cd948614afe61c17502533c64650a7 100644 (file)
@@ -20,6 +20,9 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+
+#include <asm/memblock.h>
+
 #include "cm2xxx_3xxx.h"
 #include "prm2xxx_3xxx.h"
 #ifdef CONFIG_BRIDGE_DVFS
index 46b09dae770ebadb7ae85c2568786a3743e9460b..2286410671e7e6ecded10796c3d268ea92863b8c 100644 (file)
@@ -49,6 +49,7 @@
 #define GPMC_ECC_CONTROL       0x1f8
 #define GPMC_ECC_SIZE_CONFIG   0x1fc
 #define GPMC_ECC1_RESULT        0x200
+#define GPMC_ECC_BCH_RESULT_0   0x240   /* not available on OMAP2 */
 
 /* GPMC ECC control settings */
 #define GPMC_ECC_CTRL_ECCCLEAR         0x100
@@ -935,3 +936,186 @@ int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code)
        return 0;
 }
 EXPORT_SYMBOL_GPL(gpmc_calculate_ecc);
+
+#ifdef CONFIG_ARCH_OMAP3
+
+/**
+ * gpmc_init_hwecc_bch - initialize hardware BCH ecc functionality
+ * @cs: chip select number
+ * @nsectors: how many 512-byte sectors to process
+ * @nerrors: how many errors to correct per sector (4 or 8)
+ *
+ * This function must be executed before any call to gpmc_enable_hwecc_bch.
+ */
+int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors)
+{
+       /* check if ecc module is in use */
+       if (gpmc_ecc_used != -EINVAL)
+               return -EINVAL;
+
+       /* support only OMAP3 class */
+       if (!cpu_is_omap34xx()) {
+               printk(KERN_ERR "BCH ecc is not supported on this CPU\n");
+               return -EINVAL;
+       }
+
+       /*
+        * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1.
+        * Other chips may be added if confirmed to work.
+        */
+       if ((nerrors == 4) &&
+           (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) {
+               printk(KERN_ERR "BCH 4-bit mode is not supported on this CPU\n");
+               return -EINVAL;
+       }
+
+       /* sanity check */
+       if (nsectors > 8) {
+               printk(KERN_ERR "BCH cannot process %d sectors (max is 8)\n",
+                      nsectors);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_init_hwecc_bch);
+
+/**
+ * gpmc_enable_hwecc_bch - enable hardware BCH ecc functionality
+ * @cs: chip select number
+ * @mode: read/write mode
+ * @dev_width: device bus width(1 for x16, 0 for x8)
+ * @nsectors: how many 512-byte sectors to process
+ * @nerrors: how many errors to correct per sector (4 or 8)
+ */
+int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
+                         int nerrors)
+{
+       unsigned int val;
+
+       /* check if ecc module is in use */
+       if (gpmc_ecc_used != -EINVAL)
+               return -EINVAL;
+
+       gpmc_ecc_used = cs;
+
+       /* clear ecc and enable bits */
+       gpmc_write_reg(GPMC_ECC_CONTROL, 0x1);
+
+       /*
+        * When using BCH, sector size is hardcoded to 512 bytes.
+        * Here we are using wrapping mode 6 both for reading and writing, with:
+        *  size0 = 0  (no additional protected byte in spare area)
+        *  size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+        */
+       gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, (32 << 22) | (0 << 12));
+
+       /* BCH configuration */
+       val = ((1                        << 16) | /* enable BCH */
+              (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
+              (0x06                     <<  8) | /* wrap mode = 6 */
+              (dev_width                <<  7) | /* bus width */
+              (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
+              (cs                       <<  1) | /* ECC CS */
+              (0x1));                            /* enable ECC */
+
+       gpmc_write_reg(GPMC_ECC_CONFIG, val);
+       gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_enable_hwecc_bch);
+
+/**
+ * gpmc_calculate_ecc_bch4 - Generate 7 ecc bytes per sector of 512 data bytes
+ * @cs:  chip select number
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc: The ecc output buffer
+ */
+int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc)
+{
+       int i;
+       unsigned long nsectors, reg, val1, val2;
+
+       if (gpmc_ecc_used != cs)
+               return -EINVAL;
+
+       nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
+
+       for (i = 0; i < nsectors; i++) {
+
+               reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
+
+               /* Read hw-computed remainder */
+               val1 = gpmc_read_reg(reg + 0);
+               val2 = gpmc_read_reg(reg + 4);
+
+               /*
+                * Add constant polynomial to remainder, in order to get an ecc
+                * sequence of 0xFFs for a buffer filled with 0xFFs; and
+                * left-justify the resulting polynomial.
+                */
+               *ecc++ = 0x28 ^ ((val2 >> 12) & 0xFF);
+               *ecc++ = 0x13 ^ ((val2 >>  4) & 0xFF);
+               *ecc++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
+               *ecc++ = 0x39 ^ ((val1 >> 20) & 0xFF);
+               *ecc++ = 0x96 ^ ((val1 >> 12) & 0xFF);
+               *ecc++ = 0xac ^ ((val1 >> 4) & 0xFF);
+               *ecc++ = 0x7f ^ ((val1 & 0xF) << 4);
+       }
+
+       gpmc_ecc_used = -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch4);
+
+/**
+ * gpmc_calculate_ecc_bch8 - Generate 13 ecc bytes per block of 512 data bytes
+ * @cs:  chip select number
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc: The ecc output buffer
+ */
+int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc)
+{
+       int i;
+       unsigned long nsectors, reg, val1, val2, val3, val4;
+
+       if (gpmc_ecc_used != cs)
+               return -EINVAL;
+
+       nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
+
+       for (i = 0; i < nsectors; i++) {
+
+               reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
+
+               /* Read hw-computed remainder */
+               val1 = gpmc_read_reg(reg + 0);
+               val2 = gpmc_read_reg(reg + 4);
+               val3 = gpmc_read_reg(reg + 8);
+               val4 = gpmc_read_reg(reg + 12);
+
+               /*
+                * Add constant polynomial to remainder, in order to get an ecc
+                * sequence of 0xFFs for a buffer filled with 0xFFs.
+                */
+               *ecc++ = 0xef ^ (val4 & 0xFF);
+               *ecc++ = 0x51 ^ ((val3 >> 24) & 0xFF);
+               *ecc++ = 0x2e ^ ((val3 >> 16) & 0xFF);
+               *ecc++ = 0x09 ^ ((val3 >> 8) & 0xFF);
+               *ecc++ = 0xed ^ (val3 & 0xFF);
+               *ecc++ = 0x93 ^ ((val2 >> 24) & 0xFF);
+               *ecc++ = 0x9a ^ ((val2 >> 16) & 0xFF);
+               *ecc++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
+               *ecc++ = 0x97 ^ (val2 & 0xFF);
+               *ecc++ = 0x79 ^ ((val1 >> 24) & 0xFF);
+               *ecc++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
+               *ecc++ = 0x24 ^ ((val1 >> 8) & 0xFF);
+               *ecc++ = 0xb5 ^ (val1 & 0xFF);
+       }
+
+       gpmc_ecc_used = -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch8);
+
+#endif /* CONFIG_ARCH_OMAP3 */
index 0389b3264abe78fa65a978c78223a10574e42614..00486a8564fdace3131b4a8309bdeb067a16cece 100644 (file)
@@ -246,6 +246,17 @@ void __init omap3xxx_check_features(void)
 
        omap_features |= OMAP3_HAS_SDRC;
 
+       /*
+        * am35x fixups:
+        * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as
+        *   reserved and therefore return 0 when read.  Unfortunately,
+        *   OMAP3_CHECK_FEATURE() will interpret some of those zeroes to
+        *   mean that a feature is present even though it isn't so clear
+        *   the incorrectly set feature bits.
+        */
+       if (soc_is_am35xx())
+               omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP);
+
        /*
         * TODO: Get additional info (where applicable)
         *       e.g. Size of L2 cache.
index fdc4303be563169dedbd458bd8391a16e43e252b..6038a8c84b743af0ed1f205af643f3a8a0013b63 100644 (file)
@@ -149,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
        ct->chip.irq_ack = omap_mask_ack_irq;
        ct->chip.irq_mask = irq_gc_mask_disable_reg;
        ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+       ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
 
        ct->regs.enable = INTC_MIR_CLEAR0;
        ct->regs.disable = INTC_MIR_SET0;
index 80e55c5c99988c2bf63d49a63bf9b562cfaff036..9fe6829f4c16f2dd0837862f5f4254ecba98230b 100644 (file)
@@ -41,6 +41,7 @@
 #include "control.h"
 #include "mux.h"
 #include "prm.h"
+#include "common.h"
 
 #define OMAP_MUX_BASE_OFFSET           0x30    /* Offset from CTRL_BASE */
 #define OMAP_MUX_BASE_SZ               0x5ca
@@ -217,8 +218,7 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
        return -ENODEV;
 }
 
-static int __init
-omap_mux_get_by_name(const char *muxname,
+int __init omap_mux_get_by_name(const char *muxname,
                        struct omap_mux_partition **found_partition,
                        struct omap_mux **found_mux)
 {
index 69fe060a0b755204875f42a9fd1e1ffc943736f9..471e62a74a166fb64a7486670a54854239cfb3b6 100644 (file)
@@ -59,6 +59,7 @@
 #define OMAP_PIN_OFF_WAKEUPENABLE      OMAP_WAKEUP_EN
 
 #define OMAP_MODE_GPIO(x)      (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
+#define OMAP_MODE_UART(x)      (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
 
 /* Flags for omapX_mux_init */
 #define OMAP_PACKAGE_MASK              0xffff
@@ -225,8 +226,18 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads);
  */
 void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
 
+int omap_mux_get_by_name(const char *muxname,
+               struct omap_mux_partition **found_partition,
+               struct omap_mux **found_mux);
 #else
 
+static inline int omap_mux_get_by_name(const char *muxname,
+               struct omap_mux_partition **found_partition,
+               struct omap_mux **found_mux)
+{
+       return 0;
+}
+
 static inline int omap_mux_init_gpio(int gpio, int val)
 {
        return 0;
index bf86f7e8f91f5837869cb1b799f2b59ab4a81395..2d710f50fca2fafd9cac99d2f515220c41de410f 100644 (file)
@@ -530,7 +530,7 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
        if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
                _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
        if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
-               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v);
 
        /* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
@@ -1124,15 +1124,18 @@ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap
  * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
  * @oh: struct omap_hwmod *
  *
- * If module is marked as SWSUP_SIDLE, force the module out of slave
- * idle; otherwise, configure it for smart-idle.  If module is marked
- * as SWSUP_MSUSPEND, force the module out of master standby;
- * otherwise, configure it for smart-standby.  No return value.
+ * Ensure that the OCP_SYSCONFIG register for the IP block represented
+ * by @oh is set to indicate to the PRCM that the IP block is active.
+ * Usually this means placing the module into smart-idle mode and
+ * smart-standby, but if there is a bug in the automatic idle handling
+ * for the IP block, it may need to be placed into the force-idle or
+ * no-idle variants of these modes.  No return value.
  */
 static void _enable_sysc(struct omap_hwmod *oh)
 {
        u8 idlemode, sf;
        u32 v;
+       bool clkdm_act;
 
        if (!oh->class->sysc)
                return;
@@ -1141,8 +1144,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+               clkdm_act = ((oh->clkdm &&
+                             oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
+                            (oh->_clk && oh->_clk->clkdm &&
+                             oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+               if (clkdm_act && !(oh->class->sysc->idlemodes &
+                                  (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
+                               HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1208,8 +1219,13 @@ static void _idle_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+               /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
+               if (oh->flags & HWMOD_SWSUP_SIDLE ||
+                   !(oh->class->sysc->idlemodes &
+                     (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
index 950454a3fa314da4448c99eeaaf50f81201b382b..b7bcba5221ba260b31d0327b37442e52fb807143 100644 (file)
@@ -393,8 +393,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0004,
        .sysc_flags     = SYSC_HAS_SIDLEMODE,
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          SIDLE_SMART_WKUP),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO),
        .sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
@@ -854,6 +853,11 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
        .name           = "dss_hdmi",
        .class          = &omap44xx_hdmi_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
+       /*
+        * HDMI audio requires to use no-idle mode. Hence,
+        * set idle mode by software.
+        */
+       .flags          = HWMOD_SWSUP_SIDLE,
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
        .main_clk       = "dss_48mhz_clk",
@@ -1924,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
@@ -1959,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
@@ -1994,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
@@ -2029,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
@@ -3860,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
 };
 
 /* usb_host_fs -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
        .master         = &omap44xx_usb_host_fs_hwmod,
        .slave          = &omap44xx_l3_main_2_hwmod,
        .clk            = "l3_div_ck",
@@ -3918,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
 };
 
 /* aess -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
        .master         = &omap44xx_aess_hwmod,
        .slave          = &omap44xx_l4_abe_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4009,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
 };
 
 /* l4_abe -> aess */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4027,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
 };
 
 /* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -5853,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
 };
 
 /* l4_cfg -> usb_host_fs */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
        .master         = &omap44xx_l4_cfg_hwmod,
        .slave          = &omap44xx_usb_host_fs_hwmod,
        .clk            = "l4_div_ck",
@@ -6010,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_iva__l3_main_2,
        &omap44xx_l3_main_1__l3_main_2,
        &omap44xx_l4_cfg__l3_main_2,
-       &omap44xx_usb_host_fs__l3_main_2,
+       /* &omap44xx_usb_host_fs__l3_main_2, */
        &omap44xx_usb_host_hs__l3_main_2,
        &omap44xx_usb_otg_hs__l3_main_2,
        &omap44xx_l3_main_1__l3_main_3,
        &omap44xx_l3_main_2__l3_main_3,
        &omap44xx_l4_cfg__l3_main_3,
-       &omap44xx_aess__l4_abe,
+       /* &omap44xx_aess__l4_abe, */
        &omap44xx_dsp__l4_abe,
        &omap44xx_l3_main_1__l4_abe,
        &omap44xx_mpu__l4_abe,
@@ -6025,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_cfg__l4_wkup,
        &omap44xx_mpu__mpu_private,
        &omap44xx_l4_cfg__ocp_wp_noc,
-       &omap44xx_l4_abe__aess,
-       &omap44xx_l4_abe__aess_dma,
+       /* &omap44xx_l4_abe__aess, */
+       /* &omap44xx_l4_abe__aess_dma, */
        &omap44xx_l3_main_2__c2c,
        &omap44xx_l4_wkup__counter_32k,
        &omap44xx_l4_cfg__ctrl_module_core,
@@ -6132,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_per__uart2,
        &omap44xx_l4_per__uart3,
        &omap44xx_l4_per__uart4,
-       &omap44xx_l4_cfg__usb_host_fs,
+       /* &omap44xx_l4_cfg__usb_host_fs, */
        &omap44xx_l4_cfg__usb_host_hs,
        &omap44xx_l4_cfg__usb_otg_hs,
        &omap44xx_l4_cfg__usb_tll_hs,
index a05a62f9ee5b2e7bce17e9b89fc4ce6ff0f18d2b..acc216491b8a5908656354f7c2ba7ffbfce8944a 100644 (file)
@@ -155,10 +155,11 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
        u8 multi = error & L3_ERROR_LOG_MULTI;
        u32 address = omap3_l3_decode_addr(error_addr);
 
-       WARN(true, "%s seen by %s %s at address %x\n",
+       pr_err("%s seen by %s %s at address %x\n",
                        omap3_l3_code_string(code),
                        omap3_l3_initiator_string(initid),
                        multi ? "Multiple Errors" : "", address);
+       WARN_ON(1);
 
        return IRQ_HANDLED;
 }
index 4c90477e6f82a91c5c4ddf140cd02cb718f69fc7..d52651a05daa6ce0686ec32a4915e17c8fdc9919 100644 (file)
@@ -239,21 +239,15 @@ void am35x_set_mode(u8 musb_mode)
 
        devconf2 &= ~CONF2_OTGMODE;
        switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
        case MUSB_HOST:         /* Force VBUS valid, ID = 0 */
                devconf2 |= CONF2_FORCE_HOST;
                break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        case MUSB_PERIPHERAL:   /* Force VBUS valid, ID = 1 */
                devconf2 |= CONF2_FORCE_DEVICE;
                break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
        case MUSB_OTG:          /* Don't override the VBUS/ID comparators */
                devconf2 |= CONF2_NO_OVERRIDE;
                break;
-#endif
        default:
                pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
        }
index a34023d0ca7c665627d96ba4109a8a0e17b4ec6e..3a595e8997245495672d703fc52c7a148ffb0a9f 100644 (file)
@@ -724,6 +724,7 @@ int __init omap3_pm_init(void)
        ret = request_irq(omap_prcm_event_to_irq("io"),
                _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
                omap3_pm_init);
+       enable_irq(omap_prcm_event_to_irq("io"));
 
        if (ret) {
                pr_err("pm: Failed to request pm_io irq\n");
index 9ce765407ad55d5ac9190af77cdb48338c338752..21cb74003a562f27bc2a5a8538093d92503c44d0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 
 #include "common.h"
 #include <plat/cpu.h>
@@ -303,8 +304,15 @@ void omap3xxx_prm_restore_irqen(u32 *saved_mask)
 
 static int __init omap3xxx_prcm_init(void)
 {
-       if (cpu_is_omap34xx())
-               return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
-       return 0;
+       int ret = 0;
+
+       if (cpu_is_omap34xx()) {
+               ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
+               if (!ret)
+                       irq_set_status_flags(omap_prcm_event_to_irq("io"),
+                                            IRQ_NOAUTOEN);
+       }
+
+       return ret;
 }
 subsys_initcall(omap3xxx_prcm_init);
index 292d4aaca068e4c0971f03d1f1aabcd27c3ba1f5..c1b93c752d7013307b982422f0d871472a3d3eb0 100644 (file)
@@ -57,6 +57,7 @@ struct omap_uart_state {
 
        struct list_head node;
        struct omap_hwmod *oh;
+       struct omap_device_pad default_omap_uart_pads[2];
 };
 
 static LIST_HEAD(uart_list);
@@ -126,11 +127,70 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {}
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_OMAP_MUX
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
+
+#define OMAP_UART_DEFAULT_PAD_NAME_LEN 28
+static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN],
+               tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata;
+
+static void  __init
+omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata,
+                               struct omap_uart_state *uart)
+{
+       uart->default_omap_uart_pads[0].name = rx_pad_name;
+       uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX |
+                                                       OMAP_DEVICE_PAD_WAKEUP;
+       uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT |
+                                                       OMAP_MUX_MODE0;
+       uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0;
+       uart->default_omap_uart_pads[1].name = tx_pad_name;
+       uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT |
+                                                       OMAP_MUX_MODE0;
+       bdata->pads = uart->default_omap_uart_pads;
+       bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads);
+}
+
+static void  __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+                                               struct omap_uart_state *uart)
 {
+       struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL;
+       struct omap_mux *rx_mux = NULL, *tx_mux = NULL;
+       char *rx_fmt, *tx_fmt;
+       int uart_nr = bdata->id + 1;
+
+       if (bdata->id != 2) {
+               rx_fmt = "uart%d_rx.uart%d_rx";
+               tx_fmt = "uart%d_tx.uart%d_tx";
+       } else {
+               rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx";
+               tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx";
+       }
+
+       snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt,
+                       uart_nr, uart_nr);
+       snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt,
+                       uart_nr, uart_nr);
+
+       if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 &&
+                       omap_mux_get_by_name
+                               (tx_pad_name, &tx_partition, &tx_mux) >= 0) {
+               u16 tx_mode, rx_mode;
+
+               tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset);
+               rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset);
+
+               /*
+                * Check if uart is used in default tx/rx mode i.e. in mux mode0
+                * if yes then configure rx pin for wake up capability
+                */
+               if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode))
+                       omap_serial_fill_uart_tx_rx_pads(bdata, uart);
+       }
 }
 #else
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
+static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+               struct omap_uart_state *uart)
+{
+}
 #endif
 
 static char *cmdline_find_option(char *str)
@@ -287,8 +347,7 @@ void __init omap_serial_board_init(struct omap_uart_port_info *info)
                bdata.pads = NULL;
                bdata.pads_cnt = 0;
 
-               if (cpu_is_omap44xx() || cpu_is_omap34xx())
-                       omap_serial_fill_default_pads(&bdata);
+               omap_serial_check_wakeup(&bdata, uart);
 
                if (!info)
                        omap_serial_init_port(&bdata, NULL);
index 119d5a910f3a4a7ef902b5a2cb4054edc1278d56..43a979075338a76d03894e406952e9d6eb28c1ad 100644 (file)
@@ -32,6 +32,7 @@
 #include "twl-common.h"
 #include "pm.h"
 #include "voltage.h"
+#include "mux.h"
 
 static struct i2c_board_info __initdata pmic_i2c_board_info = {
        .addr           = 0x48,
@@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
                    struct twl6040_platform_data *twl6040_data, int twl6040_irq)
 {
        /* PMIC part*/
+       omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
        strncpy(omap4_i2c1_board_info[0].type, pmic_type,
                sizeof(omap4_i2c1_board_info[0].type));
        omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
index b19d1b43c12e59ef678debd195225c52f97e1e5a..c4a576856661014ea3bec9acc70f80e32d62c33b 100644 (file)
@@ -41,12 +41,10 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_MUSB_OTG
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+#else
        .mode           = MUSB_HOST,
-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
-       .mode           = MUSB_PERIPHERAL,
 #endif
        /* .clock is set dynamically */
        .config         = &musb_config,
index db84a46ce7fd6da27ca899d3fcf326cae0a3ee79..805bea6edf1711adf0de68e8f68318bdd4c44b80 100644 (file)
@@ -300,7 +300,7 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
                printk(error, 3, status);
                return status;
        }
-       tusb_resources[2].start = irq + IH_GPIO_BASE;
+       tusb_resources[2].start = gpio_to_irq(irq);
 
        /* set up memory timings ... can speed them up later */
        if (!ps_refclk) {
index 96484bcd34ca66bf441cde80c8735cb591c29b40..11a3c1e9801f4bea759d6deb3ec9179b019c3650 100644 (file)
@@ -35,5 +35,5 @@
 #define MAIN_IRQ_MASK          (ORION5X_BRIDGE_VIRT_BASE | 0x204)
 
 #define TIMER_VIRT_BASE                (ORION5X_BRIDGE_VIRT_BASE | 0x300)
-
+#define TIMER_PHYS_BASE                (ORION5X_BRIDGE_PHYS_BASE | 0x300)
 #endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
new file mode 100644 (file)
index 0000000..1aa5d0a
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-orion5x/include/mach/io.h
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
+
+#include <mach/orion5x.h>
+#include <asm/sizes.h>
+
+#define IO_SPACE_LIMIT         SZ_2M
+static inline void __iomem *__io(unsigned long addr)
+{
+       return (void __iomem *)(addr + ORION5X_PCIE_IO_VIRT_BASE);
+}
+
+#define __io(a)                         __io(a)
+#endif
index 2745f5d95b3fe085a30e6c6c6c11c6c6c5ec687b..683e085ce1624088f7ec138c1c4fa14c636a2ddd 100644 (file)
@@ -82,6 +82,7 @@
 #define  UART1_VIRT_BASE               (ORION5X_DEV_BUS_VIRT_BASE | 0x2100)
 
 #define ORION5X_BRIDGE_VIRT_BASE       (ORION5X_REGS_VIRT_BASE | 0x20000)
+#define ORION5X_BRIDGE_PHYS_BASE       (ORION5X_REGS_PHYS_BASE | 0x20000)
 
 #define ORION5X_PCI_VIRT_BASE          (ORION5X_REGS_VIRT_BASE | 0x30000)
 
index a74f3cf54cc56b8ae0c1ecf1caefbd0d2dccc138..b4203277f3cd5b732c746a481ef435be1d3bdbad 100644 (file)
@@ -251,8 +251,6 @@ static void ts78xx_ts_nand_read_buf(struct mtd_info *mtd,
                readsb(io_base, buf, len);
 }
 
-const char *ts_nand_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition ts78xx_ts_nand_parts[] = {
        {
                .name           = "mbr",
@@ -277,7 +275,6 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = {
 static struct platform_nand_data ts78xx_ts_nand_data = {
        .chip   = {
                .nr_chips               = 1,
-               .part_probe_types       = ts_nand_part_probes,
                .partitions             = ts78xx_ts_nand_parts,
                .nr_partitions          = ARRAY_SIZE(ts78xx_ts_nand_parts),
                .chip_delay             = 15,
index 56e8cebeb7d5240dab37761ddc25978d997a6ece..9244493dbcb7dc402ca1f69eb34b120c01487a15 100644 (file)
@@ -679,8 +679,6 @@ static struct mtd_partition balloon3_partition_info[] = {
        },
 };
 
-static const char *balloon3_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data balloon3_nand_pdata = {
        .chip = {
                .nr_chips       = 4,
@@ -688,7 +686,6 @@ struct platform_nand_data balloon3_nand_pdata = {
                .nr_partitions  = ARRAY_SIZE(balloon3_partition_info),
                .partitions     = balloon3_partition_info,
                .chip_delay     = 50,
-               .part_probe_types = balloon3_part_probes,
        },
        .ctrl = {
                .hwcontrol      = 0,
index a3a4a38d49727a818e2228043f4194a8046eb6c6..97f82ad341bfbcbb98123499856b8811106a223e 100644 (file)
@@ -338,8 +338,6 @@ static struct mtd_partition em_x270_partition_info[] = {
        },
 };
 
-static const char *em_x270_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data em_x270_nand_platdata = {
        .chip = {
                .nr_chips = 1,
@@ -347,7 +345,6 @@ struct platform_nand_data em_x270_nand_platdata = {
                .nr_partitions = ARRAY_SIZE(em_x270_partition_info),
                .partitions = em_x270_partition_info,
                .chip_delay = 20,
-               .part_probe_types = em_x270_part_probes,
        },
        .ctrl = {
                .hwcontrol = 0,
index d09da6a746b8a15d212e0e1aabcaeacbf8b480cc..d3de84b0dcbed280a27d12092509f6a9df4e181e 100644 (file)
@@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO19_SSP2_SCLK,
        GPIO86_SSP2_RXD,
        GPIO87_SSP2_TXD,
-       GPIO88_GPIO,
+       GPIO88_GPIO | MFP_LPM_DRIVE_HIGH,       /* TSC2046_CS */
+
+       /* BQ24022 Regulator */
+       GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_nCHARGE_EN */
+       GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_ISET2 */
 
        /* HX4700 specific input GPIOs */
        GPIO12_GPIO | WAKEUP_ON_EDGE_RISE,      /* ASIC3_IRQ */
@@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO14_GPIO,    /* nWLAN_IRQ */
 
        /* HX4700 specific output GPIOs */
+       GPIO61_GPIO | MFP_LPM_DRIVE_HIGH,       /* W3220_nRESET */
+       GPIO71_GPIO | MFP_LPM_DRIVE_HIGH,       /* ASIC3_nRESET */
+       GPIO81_GPIO | MFP_LPM_DRIVE_HIGH,       /* CPU_GP_nRESET */
+       GPIO116_GPIO | MFP_LPM_DRIVE_HIGH,      /* CPU_HW_nRESET */
        GPIO102_GPIO | MFP_LPM_DRIVE_LOW,       /* SYNAPTICS_POWER_ON */
 
        GPIO10_GPIO,    /* GSM_IRQ */
@@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
        { GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
        { GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
        { GPIO32_HX4700_RS232_ON,         GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+       { GPIO61_HX4700_W3220_nRESET,     GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
        { GPIO71_HX4700_ASIC3_nRESET,     GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+       { GPIO81_HX4700_CPU_GP_nRESET,    GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
        { GPIO82_HX4700_EUART_RESET,      GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+       { GPIO116_HX4700_CPU_HW_nRESET,   GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
 };
 
 static void __init hx4700_init(void)
 {
        int ret;
 
+       PCFR = PCFR_GPR_EN | PCFR_OPDE;
+
        pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
        gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
        ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
index 9507605ed547a5366abf75cb704ee7c23a50ee62..0da35dccfd8932aa4e8ed82209fe07d593deb5bd 100644 (file)
@@ -268,8 +268,6 @@ static struct mtd_partition palmtx_partition_info[] = {
        },
 };
 
-static const char *palmtx_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data palmtx_nand_platdata = {
        .chip   = {
                .nr_chips               = 1,
@@ -277,7 +275,6 @@ struct platform_nand_data palmtx_nand_platdata = {
                .nr_partitions          = ARRAY_SIZE(palmtx_partition_info),
                .partitions             = palmtx_partition_info,
                .chip_delay             = 20,
-               .part_probe_types       = palmtx_part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = palmtx_nand_cmd_ctl,
index 414364eb426cf70d58a439b7e99b15e26c91a0c7..cb2883d553b5e53c320588eb740d435e111d0758 100644 (file)
@@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = {
 static struct clk s3c2440_clk_ac97 = {
        .name           = "ac97",
        .enable         = s3c2410_clkcon_enable,
-       .ctrlbit        = S3C2440_CLKCON_CAMERA,
+       .ctrlbit        = S3C2440_CLKCON_AC97,
 };
 
 static unsigned long  s3c2440_fclk_n_getrate(struct clk *clk)
index e53b2177319e701cdb44e8f479e2de62282e826d..b7a9f4d469e816bf6ca1f389f31d9280d06ecc46 100644 (file)
 #define IRQ_S32416_WDT         S3C2410_IRQSUB(27)
 #define IRQ_S32416_AC97                S3C2410_IRQSUB(28)
 
+/* second interrupt-register of s3c2416/s3c2450 */
+
+#define S3C2416_IRQ(x)         S3C2410_IRQ((x) + 54 + 29)
+#define IRQ_S3C2416_2D         S3C2416_IRQ(0)
+#define IRQ_S3C2416_IIC1       S3C2416_IRQ(1)
+#define IRQ_S3C2416_RESERVED2  S3C2416_IRQ(2)
+#define IRQ_S3C2416_RESERVED3  S3C2416_IRQ(3)
+#define IRQ_S3C2416_PCM0       S3C2416_IRQ(4)
+#define IRQ_S3C2416_PCM1       S3C2416_IRQ(5)
+#define IRQ_S3C2416_I2S0       S3C2416_IRQ(6)
+#define IRQ_S3C2416_I2S1       S3C2416_IRQ(7)
 
 /* extra irqs for s3c2440 */
 
 #define IRQ_S3C2443_WDT                S3C2410_IRQSUB(27)
 #define IRQ_S3C2443_AC97       S3C2410_IRQSUB(28)
 
-#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
+#if defined(CONFIG_CPU_S3C2416)
+#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
+#elif defined(CONFIG_CPU_S3C2443)
 #define NR_IRQS (IRQ_S3C2443_AC97+1)
 #else
 #define NR_IRQS (IRQ_S3C2440_AC97+1)
index fd49f35e448ec7090098a4293493df7ee8a32f86..23ec97370f3272ea21ad6f745b859c5105ff04e2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/io.h>
+#include <linux/syscore_ops.h>
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
@@ -192,6 +193,43 @@ static struct irq_chip s3c2416_irq_uart3 = {
        .irq_ack        = s3c2416_irq_uart3_ack,
 };
 
+/* second interrupt register */
+
+static inline void s3c2416_irq_ack_second(struct irq_data *data)
+{
+       unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+
+       __raw_writel(bitval, S3C2416_SRCPND2);
+       __raw_writel(bitval, S3C2416_INTPND2);
+}
+
+static void s3c2416_irq_mask_second(struct irq_data *data)
+{
+       unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+       unsigned long mask;
+
+       mask = __raw_readl(S3C2416_INTMSK2);
+       mask |= bitval;
+       __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+static void s3c2416_irq_unmask_second(struct irq_data *data)
+{
+       unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+       unsigned long mask;
+
+       mask = __raw_readl(S3C2416_INTMSK2);
+       mask &= ~bitval;
+       __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+struct irq_chip s3c2416_irq_second = {
+       .irq_ack        = s3c2416_irq_ack_second,
+       .irq_mask       = s3c2416_irq_mask_second,
+       .irq_unmask     = s3c2416_irq_unmask_second,
+};
+
+
 /* IRQ initialisation code */
 
 static int __init s3c2416_add_sub(unsigned int base,
@@ -213,6 +251,42 @@ static int __init s3c2416_add_sub(unsigned int base,
        return 0;
 }
 
+static void __init s3c2416_irq_add_second(void)
+{
+       unsigned long pend;
+       unsigned long last;
+       int irqno;
+       int i;
+
+       /* first, clear all interrupts pending... */
+       last = 0;
+       for (i = 0; i < 4; i++) {
+               pend = __raw_readl(S3C2416_INTPND2);
+
+               if (pend == 0 || pend == last)
+                       break;
+
+               __raw_writel(pend, S3C2416_SRCPND2);
+               __raw_writel(pend, S3C2416_INTPND2);
+               printk(KERN_INFO "irq: clearing pending status %08x\n",
+                      (int)pend);
+               last = pend;
+       }
+
+       for (irqno = IRQ_S3C2416_2D; irqno <= IRQ_S3C2416_I2S1; irqno++) {
+               switch (irqno) {
+               case IRQ_S3C2416_RESERVED2:
+               case IRQ_S3C2416_RESERVED3:
+                       /* no IRQ here */
+                       break;
+               default:
+                       irq_set_chip_and_handler(irqno, &s3c2416_irq_second,
+                                                handle_edge_irq);
+                       set_irq_flags(irqno, IRQF_VALID);
+               }
+       }
+}
+
 static int __init s3c2416_irq_add(struct device *dev,
                                  struct subsys_interface *sif)
 {
@@ -232,6 +306,8 @@ static int __init s3c2416_irq_add(struct device *dev,
                        &s3c2416_irq_wdtac97,
                        IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
 
+       s3c2416_irq_add_second();
+
        return 0;
 }
 
@@ -248,3 +324,25 @@ static int __init s3c2416_irq_init(void)
 
 arch_initcall(s3c2416_irq_init);
 
+#ifdef CONFIG_PM
+static struct sleep_save irq_save[] = {
+       SAVE_ITEM(S3C2416_INTMSK2),
+};
+
+int s3c2416_irq_suspend(void)
+{
+       s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
+
+       return 0;
+}
+
+void s3c2416_irq_resume(void)
+{
+       s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
+}
+
+struct syscore_ops s3c2416_irq_syscore_ops = {
+       .suspend        = s3c2416_irq_suspend,
+       .resume         = s3c2416_irq_resume,
+};
+#endif
index 30a44f806e0154b90bb658287c734b6f9a5126cd..c3100a044fbe9b632d364e8adc1125da464a0b15 100644 (file)
@@ -148,23 +148,25 @@ static struct s3c24xx_hsudc_platdata smdk2416_hsudc_platdata = {
 
 static struct s3c_fb_pd_win smdk2416_fb_win[] = {
        [0] = {
-               /* think this is the same as the smdk6410 */
-               .win_mode       = {
-                       .pixclock       = 41094,
-                       .left_margin    = 8,
-                       .right_margin   = 13,
-                       .upper_margin   = 7,
-                       .lower_margin   = 5,
-                       .hsync_len      = 3,
-                       .vsync_len      = 1,
-                       .xres           = 800,
-                       .yres           = 480,
-               },
                .default_bpp    = 16,
                .max_bpp        = 32,
+               .xres           = 800,
+               .yres           = 480,
        },
 };
 
+static struct fb_videomode smdk2416_lcd_timing = {
+       .pixclock       = 41094,
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
 static void s3c2416_fb_gpio_setup_24bpp(void)
 {
        unsigned int gpio;
@@ -187,6 +189,7 @@ static void s3c2416_fb_gpio_setup_24bpp(void)
 
 static struct s3c_fb_platdata smdk2416_fb_platdata = {
        .win[0]         = &smdk2416_fb_win[0],
+       .vtiming        = &smdk2416_lcd_timing,
        .setup_gpio     = s3c2416_fb_gpio_setup_24bpp,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
index 7743fade50dfa59e1c3d669d5aa55478adb993e1..ed5a95ece9eb2d365b7152cdb4bd5cfe508e672b 100644 (file)
@@ -106,6 +106,7 @@ int __init s3c2416_init(void)
        register_syscore_ops(&s3c2416_pm_syscore_ops);
 #endif
        register_syscore_ops(&s3c24xx_irq_syscore_ops);
+       register_syscore_ops(&s3c2416_irq_syscore_ops);
 
        return device_register(&s3c2416_dev);
 }
index 179460f38db7587c94321ca2c401173cf174da77..acb197ccf3f7e7635ccbba7a3426303701e0fbf8 100644 (file)
@@ -27,12 +27,7 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
                              struct cpuidle_driver *drv,
                              int index)
 {
-       struct timeval before, after;
        unsigned long tmp;
-       int idle_time;
-
-       local_irq_disable();
-       do_gettimeofday(&before);
 
        /* Setup PWRCFG to enter idle mode */
        tmp = __raw_readl(S3C64XX_PWR_CFG);
@@ -42,42 +37,32 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
 
        cpu_do_idle();
 
-       do_gettimeofday(&after);
-       local_irq_enable();
-       idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
-                   (after.tv_usec - before.tv_usec);
-
-       dev->last_residency = idle_time;
        return index;
 }
 
-static struct cpuidle_state s3c64xx_cpuidle_set[] = {
-       [0] = {
-               .enter                  = s3c64xx_enter_idle,
-               .exit_latency           = 1,
-               .target_residency       = 1,
-               .flags                  = CPUIDLE_FLAG_TIME_VALID,
-               .name                   = "IDLE",
-               .desc                   = "System active, ARM gated",
-       },
-};
+static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
 
 static struct cpuidle_driver s3c64xx_cpuidle_driver = {
-       .name           = "s3c64xx_cpuidle",
-       .owner          = THIS_MODULE,
-       .state_count    = ARRAY_SIZE(s3c64xx_cpuidle_set),
-};
-
-static struct cpuidle_device s3c64xx_cpuidle_device = {
-       .state_count    = ARRAY_SIZE(s3c64xx_cpuidle_set),
+       .name   = "s3c64xx_cpuidle",
+       .owner  = THIS_MODULE,
+       .en_core_tk_irqen = 1,
+       .states = {
+               {
+                       .enter            = s3c64xx_enter_idle,
+                       .exit_latency     = 1,
+                       .target_residency = 1,
+                       .flags            = CPUIDLE_FLAG_TIME_VALID,
+                       .name             = "IDLE",
+                       .desc             = "System active, ARM gated",
+               },
+       },
+       .state_count = 1,
 };
 
 static int __init s3c64xx_init_cpuidle(void)
 {
        int ret;
 
-       memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set,
-              sizeof(s3c64xx_cpuidle_set));
        cpuidle_register_driver(&s3c64xx_cpuidle_driver);
 
        ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
index 314df0518afd51acf8c65525d219a0dcb3047e34..ffa29ddfdfced9084d16b49c509b79fa342208b5 100644 (file)
@@ -134,24 +134,27 @@ static struct platform_device anw6410_lcd_powerdev = {
 };
 
 static struct s3c_fb_pd_win anw6410_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode anw6410_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &anw6410_lcd_timing,
        .win[0]         = &anw6410_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
index 0ace108c3e3d710264f4c88dfa5676702723fbfc..7a27f5603c7405fca49409c33ee9af79413de701 100644 (file)
@@ -182,6 +182,11 @@ static const struct i2c_board_info wm1277_devs[] = {
        },
 };
 
+static const struct i2c_board_info wm6230_i2c_devs[] = {
+       { I2C_BOARD_INFO("wm9081", 0x6c),
+         .platform_data = &wm9081_pdata, },
+};
+
 static __devinitdata const struct {
        u8 id;
        const char *name;
@@ -195,7 +200,9 @@ static __devinitdata const struct {
        { .id = 0x03, .name = "1252-EV1 Glenlivet" },
        { .id = 0x11, .name = "6249-EV2 Glenfarclas", },
        { .id = 0x14, .name = "6271-EV1 Lochnagar" },
-       { .id = 0x15, .name = "XXXX-EV1 Bells" },
+       { .id = 0x15, .name = "6320-EV1 Bells",
+         .i2c_devs = wm6230_i2c_devs,
+         .num_i2c_devs = ARRAY_SIZE(wm6230_i2c_devs) },
        { .id = 0x21, .name = "1275-EV1 Mortlach" },
        { .id = 0x25, .name = "1274-EV1 Glencadam" },
        { .id = 0x31, .name = "1253-EV1 Tomatin",
index eda5e027b109aa6a9af206c782cbfba6cd58bb9f..d0c352d861f8f341fd93ac2da561b7c2bab35805 100644 (file)
@@ -151,26 +151,29 @@ static struct platform_device crag6410_lcd_powerdev = {
 
 /* 640x480 URT */
 static struct s3c_fb_pd_win crag6410_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 150,
-               .right_margin   = 80,
-               .upper_margin   = 40,
-               .lower_margin   = 5,
-               .hsync_len      = 40,
-               .vsync_len      = 5,
-               .xres           = 640,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 640,
+       .yres           = 480,
        .virtual_y      = 480 * 2,
        .virtual_x      = 640,
 };
 
+static struct fb_videomode crag6410_lcd_timing = {
+       .left_margin    = 150,
+       .right_margin   = 80,
+       .upper_margin   = 40,
+       .lower_margin   = 5,
+       .hsync_len      = 40,
+       .vsync_len      = 5,
+       .xres           = 640,
+       .yres           = 480,
+};
+
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &crag6410_lcd_timing,
        .win[0]         = &crag6410_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -671,6 +674,7 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
          .irq = S3C_EINT(0),
          .platform_data = &glenfarclas_pmic_pdata },
 
+       { I2C_BOARD_INFO("wlf-gf-module", 0x22) },
        { I2C_BOARD_INFO("wlf-gf-module", 0x24) },
        { I2C_BOARD_INFO("wlf-gf-module", 0x25) },
        { I2C_BOARD_INFO("wlf-gf-module", 0x26) },
index 1bf6b9da20fc487aaf78086ad715bb50f5351023..689088162f77e4eaab4868cb519889e976ab4401 100644 (file)
@@ -129,23 +129,27 @@ static struct platform_device hmt_backlight_device = {
 };
 
 static struct s3c_fb_pd_win hmt_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode hmt_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata hmt_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &hmt_lcd_timing,
        .win[0]         = &hmt_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
index f8ea61ea3b33f27b137503d24bbccef1b205ec33..5539a255a70446a8e89f3c2aa6e3735fadef08c2 100644 (file)
@@ -140,41 +140,59 @@ static struct s3c2410_platform_nand mini6410_nand_info = {
        .sets           = mini6410_nand_sets,
 };
 
-static struct s3c_fb_pd_win mini6410_fb_win[] = {
+static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 272,
+};
+
+static struct fb_videomode mini6410_lcd_type0_timing = {
+       /* 4.3" 480x272 */
+       .left_margin    = 3,
+       .right_margin   = 2,
+       .upper_margin   = 1,
+       .lower_margin   = 1,
+       .hsync_len      = 40,
+       .vsync_len      = 1,
+       .xres           = 480,
+       .yres           = 272,
+};
+
+static struct s3c_fb_pd_win mini6410_lcd_type1_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode mini6410_lcd_type1_timing = {
+       /* 7.0" 800x480 */
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct s3c_fb_platdata mini6410_lcd_pdata[] __initdata = {
        {
-               .win_mode       = {     /* 4.3" 480x272 */
-                       .left_margin    = 3,
-                       .right_margin   = 2,
-                       .upper_margin   = 1,
-                       .lower_margin   = 1,
-                       .hsync_len      = 40,
-                       .vsync_len      = 1,
-                       .xres           = 480,
-                       .yres           = 272,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &mini6410_lcd_type0_timing,
+               .win[0]         = &mini6410_lcd_type0_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        }, {
-               .win_mode       = {     /* 7.0" 800x480 */
-                       .left_margin    = 8,
-                       .right_margin   = 13,
-                       .upper_margin   = 7,
-                       .lower_margin   = 5,
-                       .hsync_len      = 3,
-                       .vsync_len      = 1,
-                       .xres           = 800,
-                       .yres           = 480,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &mini6410_lcd_type1_timing,
+               .win[0]         = &mini6410_lcd_type1_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        },
-};
-
-static struct s3c_fb_platdata mini6410_lcd_pdata __initdata = {
-       .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
-       .win[0]         = &mini6410_fb_win[0],
-       .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+       { },
 };
 
 static void mini6410_lcd_power_set(struct plat_lcd_data *pd,
@@ -272,7 +290,7 @@ static void mini6410_parse_features(
                                        "screen type already set\n", f);
                        } else {
                                int li = f - '0';
-                               if (li >= ARRAY_SIZE(mini6410_fb_win))
+                               if (li >= ARRAY_SIZE(mini6410_lcd_pdata))
                                        printk(KERN_INFO "MINI6410: '%c' out "
                                                "of range LCD mode\n", f);
                                else {
@@ -296,14 +314,12 @@ static void __init mini6410_machine_init(void)
        /* Parse the feature string */
        mini6410_parse_features(&features, mini6410_features_str);
 
-       mini6410_lcd_pdata.win[0] = &mini6410_fb_win[features.lcd_index];
-
        printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n",
-               mini6410_lcd_pdata.win[0]->win_mode.xres,
-               mini6410_lcd_pdata.win[0]->win_mode.yres);
+               mini6410_lcd_pdata[features.lcd_index].win[0]->xres,
+               mini6410_lcd_pdata[features.lcd_index].win[0]->yres);
 
        s3c_nand_set_platdata(&mini6410_nand_info);
-       s3c_fb_set_platdata(&mini6410_lcd_pdata);
+       s3c_fb_set_platdata(&mini6410_lcd_pdata[features.lcd_index]);
        s3c24xx_ts_set_platdata(NULL);
 
        /* configure nCS1 width to 16 bits */
index b92d8e17d502bebd35b876a2ce2926a4a3dde0f9..326b21604bc332a940a7056537b86aaf6d950872 100644 (file)
@@ -106,41 +106,57 @@ static struct platform_device real6410_device_eth = {
        },
 };
 
-static struct s3c_fb_pd_win real6410_fb_win[] = {
+static struct s3c_fb_pd_win real6410_lcd_type0_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 272,
+};
+
+static struct fb_videomode real6410_lcd_type0_timing = {
+       /* 4.3" 480x272 */
+       .left_margin    = 3,
+       .right_margin   = 2,
+       .upper_margin   = 1,
+       .lower_margin   = 1,
+       .hsync_len      = 40,
+       .vsync_len      = 1,
+};
+
+static struct s3c_fb_pd_win real6410_lcd_type1_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode real6410_lcd_type1_timing = {
+       /* 7.0" 800x480 */
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct s3c_fb_platdata real6410_lcd_pdata[] __initdata = {
        {
-               .win_mode       = {     /* 4.3" 480x272 */
-                       .left_margin    = 3,
-                       .right_margin   = 2,
-                       .upper_margin   = 1,
-                       .lower_margin   = 1,
-                       .hsync_len      = 40,
-                       .vsync_len      = 1,
-                       .xres           = 480,
-                       .yres           = 272,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &real6410_lcd_type0_timing,
+               .win[0]         = &real6410_lcd_type0_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        }, {
-               .win_mode       = {     /* 7.0" 800x480 */
-                       .left_margin    = 8,
-                       .right_margin   = 13,
-                       .upper_margin   = 7,
-                       .lower_margin   = 5,
-                       .hsync_len      = 3,
-                       .vsync_len      = 1,
-                       .xres           = 800,
-                       .yres           = 480,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &real6410_lcd_type1_timing,
+               .win[0]         = &real6410_lcd_type1_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        },
-};
-
-static struct s3c_fb_platdata real6410_lcd_pdata __initdata = {
-       .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
-       .win[0]         = &real6410_fb_win[0],
-       .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+       { },
 };
 
 static struct mtd_partition real6410_nand_part[] = {
@@ -253,7 +269,7 @@ static void real6410_parse_features(
                                        "screen type already set\n", f);
                        } else {
                                int li = f - '0';
-                               if (li >= ARRAY_SIZE(real6410_fb_win))
+                               if (li >= ARRAY_SIZE(real6410_lcd_pdata))
                                        printk(KERN_INFO "REAL6410: '%c' out "
                                                "of range LCD mode\n", f);
                                else {
@@ -277,13 +293,11 @@ static void __init real6410_machine_init(void)
        /* Parse the feature string */
        real6410_parse_features(&features, real6410_features_str);
 
-       real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index];
-
        printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n",
-               real6410_lcd_pdata.win[0]->win_mode.xres,
-               real6410_lcd_pdata.win[0]->win_mode.yres);
+               real6410_lcd_pdata[features.lcd_index].win[0]->xres,
+               real6410_lcd_pdata[features.lcd_index].win[0]->yres);
 
-       s3c_fb_set_platdata(&real6410_lcd_pdata);
+       s3c_fb_set_platdata(&real6410_lcd_pdata[features.lcd_index]);
        s3c_nand_set_platdata(&real6410_nand_info);
        s3c24xx_ts_set_platdata(NULL);
 
index c5021d0335c6a3c57036b86e010e1a926b96e6b1..d6266d8b43c91c049d2961e24dbdfd2b9ba7feac 100644 (file)
@@ -108,23 +108,27 @@ static struct platform_device smartq5_buttons_device  = {
 };
 
 static struct s3c_fb_pd_win smartq5_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 216,
-               .right_margin   = 40,
-               .upper_margin   = 35,
-               .lower_margin   = 10,
-               .hsync_len      = 1,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-               .refresh        = 80,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smartq5_lcd_timing = {
+       .left_margin    = 216,
+       .right_margin   = 40,
+       .upper_margin   = 35,
+       .lower_margin   = 10,
+       .hsync_len      = 1,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+       .refresh        = 80,
 };
 
 static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &smartq5_lcd_timing,
        .win[0]         = &smartq5_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
index aa9072a4cbef4b72d4652887a819fc7a882bfeb8..0957d2a980e13f239e6deec8965e8c9939473e65 100644 (file)
@@ -124,23 +124,27 @@ static struct platform_device smartq7_buttons_device  = {
 };
 
 static struct s3c_fb_pd_win smartq7_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 3,
-               .right_margin   = 5,
-               .upper_margin   = 1,
-               .lower_margin   = 20,
-               .hsync_len      = 10,
-               .vsync_len      = 3,
-               .xres           = 800,
-               .yres           = 480,
-               .refresh        = 80,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smartq7_lcd_timing = {
+       .left_margin    = 3,
+       .right_margin   = 5,
+       .upper_margin   = 1,
+       .lower_margin   = 20,
+       .hsync_len      = 10,
+       .vsync_len      = 3,
+       .xres           = 800,
+       .yres           = 480,
+       .refresh        = 80,
 };
 
 static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &smartq7_lcd_timing,
        .win[0]         = &smartq7_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
index d44319b09412cf572697f2f271543ce6260f6124..df3103d450e22c93f55976634ea003a03c6881d9 100644 (file)
@@ -146,26 +146,29 @@ static struct platform_device smdk6410_lcd_powerdev = {
 };
 
 static struct s3c_fb_pd_win smdk6410_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
        .virtual_y      = 480 * 2,
        .virtual_x      = 800,
 };
 
+static struct fb_videomode smdk6410_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &smdk6410_lcd_timing,
        .win[0]         = &smdk6410_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
index a40e325d62c88d51adee0e6f0449535508042751..92fefad505cc3972207ea7d3ec70f156148e30a8 100644 (file)
@@ -103,22 +103,26 @@ static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdk6440_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdk6440_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = {
        .win[0]         = &smdk6440_fb_win0,
+       .vtiming        = &smdk6440_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5p64x0_fb_gpio_setup_24bpp,
index efb69e2f2afe7a460b4a820f071b499eb7d33d32..e2335ecf6eae793d04dbbf1ef0f37bd8832d0940 100644 (file)
@@ -121,22 +121,26 @@ static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdk6450_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdk6450_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = {
        .win[0]         = &smdk6450_fb_win0,
+       .vtiming        = &smdk6450_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5p64x0_fb_gpio_setup_24bpp,
index 674d22992f3c10a0e6b160b78a099a7cde98e5fc..0c3ae38d27ca0e8b78d03a58833107a2329d9914 100644 (file)
@@ -136,24 +136,27 @@ static struct platform_device smdkc100_lcd_powerdev = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdkc100_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-               .refresh        = 80,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdkc100_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+       .refresh        = 80,
 };
 
 static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = {
        .win[0]         = &smdkc100_fb_win0,
+       .vtiming        = &smdkc100_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5pc100_fb_gpio_setup_24bpp,
index 48d018f2332bc020239ca49f7d42986dd30134f8..af528f9e97f976ea0bb9efe654ef46a843280194 100644 (file)
@@ -96,38 +96,34 @@ static struct s3c2410_uartcfg aquila_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win aquila_fb_win0 = {
-       .win_mode = {
-               .left_margin = 16,
-               .right_margin = 16,
-               .upper_margin = 3,
-               .lower_margin = 28,
-               .hsync_len = 2,
-               .vsync_len = 2,
-               .xres = 480,
-               .yres = 800,
-       },
        .max_bpp = 32,
        .default_bpp = 16,
+       .xres = 480,
+       .yres = 800,
 };
 
 static struct s3c_fb_pd_win aquila_fb_win1 = {
-       .win_mode = {
-               .left_margin = 16,
-               .right_margin = 16,
-               .upper_margin = 3,
-               .lower_margin = 28,
-               .hsync_len = 2,
-               .vsync_len = 2,
-               .xres = 480,
-               .yres = 800,
-       },
        .max_bpp = 32,
        .default_bpp = 16,
+       .xres = 480,
+       .yres = 800,
+};
+
+static struct fb_videomode aquila_lcd_timing = {
+       .left_margin = 16,
+       .right_margin = 16,
+       .upper_margin = 3,
+       .lower_margin = 28,
+       .hsync_len = 2,
+       .vsync_len = 2,
+       .xres = 480,
+       .yres = 800,
 };
 
 static struct s3c_fb_platdata aquila_lcd_pdata __initdata = {
        .win[0]         = &aquila_fb_win0,
        .win[1]         = &aquila_fb_win1,
+       .vtiming        = &aquila_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
                          VIDCON1_INV_VCLK | VIDCON1_INV_VDEN,
index f20a97c8e4117d02462c7033d6b098cfe985a13b..bf5087c2b7fe6078e40837374fe1933a4c143cf3 100644 (file)
@@ -107,25 +107,29 @@ static struct s3c2410_uartcfg goni_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win goni_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 16,
-               .right_margin   = 16,
-               .upper_margin   = 2,
-               .lower_margin   = 28,
-               .hsync_len      = 2,
-               .vsync_len      = 1,
-               .xres           = 480,
-               .yres           = 800,
-               .refresh        = 55,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 800,
        .virtual_x      = 480,
        .virtual_y      = 2 * 800,
 };
 
+static struct fb_videomode goni_lcd_timing = {
+       .left_margin    = 16,
+       .right_margin   = 16,
+       .upper_margin   = 2,
+       .lower_margin   = 28,
+       .hsync_len      = 2,
+       .vsync_len      = 1,
+       .xres           = 480,
+       .yres           = 800,
+       .refresh        = 55,
+};
+
 static struct s3c_fb_platdata goni_lcd_pdata __initdata = {
        .win[0]         = &goni_fb_win0,
+       .vtiming        = &goni_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
                          VIDCON0_CLKSEL_LCD,
        .vidcon1        = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
index fa1b61209fd93e11d9283f7513739e9184adeb60..0d7ddec88eb74ade7505bca367a855e714752b2b 100644 (file)
@@ -178,22 +178,26 @@ static struct platform_device smdkv210_lcd_lte480wv = {
 };
 
 static struct s3c_fb_pd_win smdkv210_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 13,
-               .right_margin   = 8,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdkv210_lcd_timing = {
+       .left_margin    = 13,
+       .right_margin   = 8,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
        .win[0]         = &smdkv210_fb_win0,
+       .vtiming        = &smdkv210_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5pv210_fb_gpio_setup_24bpp,
index f31383c32f9cdfd121e65b1d4a026d7fbd3a2f01..df33909205e2ce6155553a8485512bdd4817422b 100644 (file)
@@ -186,6 +186,12 @@ config SH_TIMER_TMU
        help
          This enables build of the TMU timer driver.
 
+config EM_TIMER_STI
+       bool "STI timer driver"
+       default y
+       help
+         This enables build of the STI timer driver.
+
 endmenu
 
 config SH_CLK_CPG
index 9e37026ef9ddb6b5aa2f12b3adcbacdc0b9fa838..9bd135531d76118feecbc2a02647344ce21b25c5 100644 (file)
@@ -779,6 +779,7 @@ DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
        .init_irq       = r8a7740_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = eva_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = eva_boards_compat_dt,
 MACHINE_END
index 7bc5e7d39f9bd12b20e175a66a32a21d27dc3623..6a33cf393428f730d9cc2b44633af997e4c7d3df 100644 (file)
@@ -80,6 +80,7 @@ DT_MACHINE_START(KZM9D_DT, "kzm9d")
        .init_irq       = emev2_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kzm9d_add_standard_devices,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = kzm9d_boards_compat_dt,
 MACHINE_END
index d8e33b682832e8279e06c1e1d27e6825ad638397..c0ae815e7beb18a9be40be4bb892bfb725af7b78 100644 (file)
@@ -455,6 +455,7 @@ DT_MACHINE_START(KZM9G_DT, "kzm9g")
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kzm_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = kzm9g_boards_compat_dt,
 MACHINE_END
index b577f7c44678ade9a79753742e6606b3eb120600..150122a446304071d23595c291b906b7b537707c 100644 (file)
@@ -1512,6 +1512,9 @@ static void __init mackerel_init(void)
        gpio_request(GPIO_FN_SDHID0_1, NULL);
        gpio_request(GPIO_FN_SDHID0_0, NULL);
 
+       /* SDHI0 PORT172 card-detect IRQ26 */
+       gpio_request(GPIO_FN_IRQ26_172, NULL);
+
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
        /* enable SDHI1 */
        gpio_request(GPIO_FN_SDHICMD1, NULL);
index 472d1f5361e5390a58ecde58966c075d0532bf6d..3946c4ba2aa813f0cea841b5c66b622c5f0d3950 100644 (file)
@@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = {
 
 enum { MSTP001,
        MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
-       MSTP219,
+       MSTP219, MSTP218,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-       MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
+       MSTP331, MSTP329, MSTP325, MSTP323,
        MSTP314, MSTP313, MSTP312, MSTP311,
        MSTP303, MSTP302, MSTP301, MSTP300,
        MSTP411, MSTP410, MSTP403,
@@ -497,6 +497,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
        [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
        [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+       [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
        [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
        [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
        [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -508,7 +509,6 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
        [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
        [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
-       [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
        [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
        [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -552,6 +552,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
        CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+       CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
        CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
        CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
        CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -563,7 +564,6 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
        CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
        CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
-       CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
        CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
        CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
index 550b23df4fd44d9b3cb4f7db19606153482b8c3e..f04fad4ec4fb5406edc4966fca6f3850b8861c56 100644 (file)
@@ -35,6 +35,9 @@
 #define INT2SMSKCR3 0xfe7822ac
 #define INT2SMSKCR4 0xfe7822b0
 
+#define INT2NTSR0 0xfe700060
+#define INT2NTSR1 0xfe700064
+
 static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 {
        return 0; /* always allow wakeup */
@@ -49,6 +52,10 @@ void __init r8a7779_init_irq(void)
        gic_init(0, 29, gic_dist_base, gic_cpu_base);
        gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 
+       /* route all interrupts to ARM */
+       __raw_writel(0xffffffff, INT2NTSR0);
+       __raw_writel(0x3fffffff, INT2NTSR1);
+
        /* unmask all known interrupts in INTCS2 */
        __raw_writel(0xfffffff0, INT2SMSKCR0);
        __raw_writel(0xfff7ffff, INT2SMSKCR1);
index bacdd667e3b192f908ef352f87d321c371a7d9a7..fde0d23121dc6e14acd614504d054d0c83f778c6 100644 (file)
 #include <mach/common.h>
 #include <mach/emev2.h>
 
+#ifdef CONFIG_ARCH_SH73A0
 #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
                        of_machine_is_compatible("renesas,sh73a0"))
+#else
+#define is_sh73a0() (0)
+#endif
+
 #define is_r8a7779() machine_is_marzen()
+
+#ifdef CONFIG_ARCH_EMEV2
 #define is_emev2() of_machine_is_compatible("renesas,emev2")
+#else
+#define is_emev2() (0)
+#endif
 
 static unsigned int __init shmobile_smp_get_core_count(void)
 {
index 6a4bd582c028e8b890e926c73d9b292c941b1689..fafce9ce8218c440028b6083a2254c4452b1a223 100644 (file)
@@ -484,7 +484,7 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
        },
 };
 
-#define SH7372_CHCLR 0x220
+#define SH7372_CHCLR (0x220 - 0x20)
 
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
        {
index ea1564609bd4229869be3cff80ea80111202f907..9e3ae6bfe50dc442c67a79387c2761e9ddf801db 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header spear13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 383ab04dc6c97bc295ff34b3ecac822e1e7c7389..d50bdb6059256bf69bcce0b8c164664acba96203 100644 (file)
@@ -4,7 +4,7 @@
  * DMA information for SPEAr13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 6d8c45b9f2984d21c2eb61ad822fbaf563fbe5c4..dac57fd0cdfdcc865e62f1f0d61b1d98919099d4 100644 (file)
@@ -4,7 +4,7 @@
  * spear13xx machine family generic header file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index cd6f4f86a56b96d702feaf2fc33f002156671491..85f176311f63e68913bfbecb4d5b612f9205096c 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f542a24aa5f2b01675b8ec2afaabc8472e0a2340..271a62b4cd314e9d0773db8dacc175ce3c568f3d 100644 (file)
@@ -4,7 +4,7 @@
  * IRQ helper macros for spear13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 30c57ef726866135947239f80078c40461bd390a..65f27def239b804348de6690ca5b2f4718229ed3 100644 (file)
@@ -4,7 +4,7 @@
  * spear13xx Machine family specific definition
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 31af3e8d976e10162f0a3bc25eb0030cbf68bbb6..3a58b8284a6aa8345e5b094ab581284586154a95 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family specific timex definitions
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c7840896ae6e65c25f4a471a5ab5ac8efeba84f9..70fe72f05deacb66cf16ea4c8fa0f887f3f28b84 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fefd15b2f380609b6b526a46113cd3c64ee4f718..732d29bc73307231ccc9009ba1b80195a7153588 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ee38cbc56869cbce720d785675293b9827e247a2..81e4ed76ad0652a278255ba2b4dda307628307d3 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 50b349ae863d9edeedc7addcce6772646f468210..cf936b106e27b23d069f1a726f0cd474e7cd60d4 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr13XX machines common source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 590519f10d6ebc415c254518b2202a66661e0548..0a6381fad5d9a40a0b24250c724d2e315bd7d585 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header spear3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4a95b9453c2a5ce67bb3f636c016d9c6c4e246bc..ce19113ca791ae1478cc0b846bdc7bf511cf7533 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family generic header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 451b2081bfc966f841966ff27cf08c2637fcabc7..2ac74c6db7f1508fc71106b17d186c0feaa50971 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 51bd62a0254c814acca905b44696e834700c9cb2..803de76f5f36fb5535c632c385869d70c86e61b8 100644 (file)
@@ -4,7 +4,7 @@
  * IRQ helper macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 18e2ac576f25cc209880e4f239f40b92fcadee89..6309bf68d6f80d8c00aad241b3bdd6bc97ccbcf5 100644 (file)
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 51eb953148a998733f656670cb24705b03f0fbc9..8cca95193d4d6b8058f23a26fdabb0cc08338ff5 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3xx Machine family specific definition
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a38cc9de876f73af957e13f9bc2738a3fe016b4d..9f5d08bd0c4441415d69cd82e788731b5f7f5214 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 53ba8bbc0dfa9e63fa7142fc2fadb1c647d8c45e..b909b011f7c84c34d52701a70adc2201f246f97a 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f74a05bdb829d5edb37c76bbb6395d39d661a431..0f882ecb7d810f4a98700fa6f46c913eec0860c9 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr300 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 84dfb09007470062cc6f14633c1d455e98562ca0..bbcf4571d361caefd90c2f685365a5a394fb56c5 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr310 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a88fa841d29d021068d644a16d7c60eb7d7df93f..88d483bcd66a37665dc130bd5f0da016192a950a 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr320 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f22419ed74a82cf394c69d0e5d51d2e53fad447d..66db5f13af844360b1b5845d1b06c13a6d51445a 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machines common source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -87,7 +87,7 @@ void __init spear3xx_map_io(void)
 
 static void __init spear3xx_timer_init(void)
 {
-       char pclk_name[] = "pll3_48m_clk";
+       char pclk_name[] = "pll3_clk";
        struct clk *gpt_clk, *pclk;
 
        spear3xx_clk_init();
index 3a789dbb69f74c0489a264e9dd20560d97a0be32..d42cefc0356dcb0086f83d568eacc026a8365547 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 179e45774b3a9722182b02140ff52566e76bdd53..c34acc201d3463fc39fcff6d410a34f9b3e0d42b 100644 (file)
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 2e2e3596583e9072318dbf895644c5aa67a90e38..9af67d003c62ce6f167307020fcd2f52fa181e8a 100644 (file)
@@ -423,7 +423,7 @@ void __init spear6xx_map_io(void)
 
 static void __init spear6xx_timer_init(void)
 {
-       char pclk_name[] = "pll3_48m_clk";
+       char pclk_name[] = "pll3_clk";
        struct clk *gpt_clk, *pclk;
 
        spear6xx_clk_init();
index 4d6a2ee99c3b37df6a57e186cc0530da08e6ec98..5beb7ebe2948ff1e706159bf958e9118b3906f7e 100644 (file)
@@ -33,7 +33,7 @@
 
 static bool is_enabled;
 
-static void tegra_cpu_reset_handler_enable(void)
+static void __init tegra_cpu_reset_handler_enable(void)
 {
        void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE);
        void __iomem *evp_cpu_reset =
index b29a788f498cbcd93e96f7c21329785bab4dd094..1f47d962e3a12f6212a7e9450075bd94e3a49193 100644 (file)
@@ -96,7 +96,7 @@ static void __init __mop500_uib_init(struct uib *uib, const char *why)
 /*
  * Detect the UIB attached based on the presence or absence of i2c devices.
  */
-static int __init mop500_uib_init(void)
+int __init mop500_uib_init(void)
 {
        struct uib *uib = mop500_uib;
        struct i2c_adapter *i2c0;
@@ -131,5 +131,3 @@ static int __init mop500_uib_init(void)
 
        return 0;
 }
-
-module_init(mop500_uib_init);
index fba8adea421e399272336b9ff8d7b6bfb8b645e9..4fd93f5c49ec359f6889f054fe33cd076c7bf822 100644 (file)
@@ -580,43 +580,12 @@ static void ux500_uart0_reset(void)
        udelay(1);
 }
 
-/* This needs to be referenced by callbacks */
-struct pinctrl *u0_p;
-struct pinctrl_state *u0_def;
-struct pinctrl_state *u0_sleep;
-
-static void ux500_uart0_init(void)
-{
-       int ret;
-
-       if (IS_ERR(u0_p) || IS_ERR(u0_def))
-               return;
-
-       ret = pinctrl_select_state(u0_p, u0_def);
-       if (ret)
-               pr_err("could not set UART0 defstate\n");
-}
-
-static void ux500_uart0_exit(void)
-{
-       int ret;
-
-       if (IS_ERR(u0_p) || IS_ERR(u0_sleep))
-               return;
-
-       ret = pinctrl_select_state(u0_p, u0_sleep);
-       if (ret)
-               pr_err("could not set UART0 idlestate\n");
-}
-
 static struct amba_pl011_data uart0_plat = {
 #ifdef CONFIG_STE_DMA40
        .dma_filter = stedma40_filter,
        .dma_rx_param = &uart0_dma_cfg_rx,
        .dma_tx_param = &uart0_dma_cfg_tx,
 #endif
-       .init = ux500_uart0_init,
-       .exit = ux500_uart0_exit,
        .reset = ux500_uart0_reset,
 };
 
@@ -638,28 +607,7 @@ static struct amba_pl011_data uart2_plat = {
 
 static void __init mop500_uart_init(struct device *parent)
 {
-       struct amba_device *uart0_device;
-
-       uart0_device = db8500_add_uart0(parent, &uart0_plat);
-       if (uart0_device) {
-               u0_p = pinctrl_get(&uart0_device->dev);
-               if (IS_ERR(u0_p))
-                       dev_err(&uart0_device->dev,
-                               "could not get UART0 pinctrl\n");
-               else {
-                       u0_def = pinctrl_lookup_state(u0_p,
-                                                     PINCTRL_STATE_DEFAULT);
-                       if (IS_ERR(u0_def)) {
-                               dev_err(&uart0_device->dev,
-                                       "could not get UART0 defstate\n");
-                       }
-                       u0_sleep = pinctrl_lookup_state(u0_p,
-                                                       PINCTRL_STATE_SLEEP);
-                       if (IS_ERR(u0_sleep))
-                               dev_err(&uart0_device->dev,
-                                       "could not get UART0 idlestate\n");
-               }
-       }
+       db8500_add_uart0(parent, &uart0_plat);
        db8500_add_uart1(parent, &uart1_plat);
        db8500_add_uart2(parent, &uart2_plat);
 }
@@ -673,6 +621,7 @@ static void __init u8500_cryp1_hash1_init(struct device *parent)
 static struct platform_device *snowball_platform_devs[] __initdata = {
        &snowball_led_dev,
        &snowball_key_dev,
+       &snowball_sbnet_dev,
        &ab8500_device,
 };
 
@@ -710,6 +659,8 @@ static void __init mop500_init_machine(void)
 
        /* This board has full regulator constraints */
        regulator_has_full_constraints();
+
+       mop500_uib_init();
 }
 
 static void __init snowball_init_machine(void)
@@ -774,6 +725,8 @@ static void __init hrefv60_init_machine(void)
 
        /* This board has full regulator constraints */
        regulator_has_full_constraints();
+
+       mop500_uib_init();
 }
 
 MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
@@ -811,6 +764,11 @@ MACHINE_END
 
 #ifdef CONFIG_MACH_UX500_DT
 
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+       &snowball_led_dev,
+       &snowball_key_dev,
+};
+
 struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        /* Requires DMA and call-back bindings. */
        OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
@@ -828,12 +786,18 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
+       /* Requires device name bindings. */
+       OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
        {},
 };
 
 static const struct of_device_id u8500_local_bus_nodes[] = {
        /* only create devices below soc node */
        { .compatible = "stericsson,db8500", },
+       { .compatible = "stericsson,db8500-prcmu", },
+       { .compatible = "stericsson,db8500-prcmu-regulator", },
+       { .compatible = "stericsson,ab8500", },
+       { .compatible = "stericsson,ab8500-regulator", },
        { .compatible = "simple-bus"},
        { },
 };
@@ -852,7 +816,7 @@ static void __init u8500_init_machine(void)
        else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
                hrefv60_pinmaps_init();
 
-       parent = u8500_init_devices();
+       parent = u8500_of_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
                mop500_platform_devs[i]->dev.parent = parent;
@@ -869,15 +833,23 @@ static void __init u8500_init_machine(void)
                                ARRAY_SIZE(mop500_platform_devs));
 
                mop500_sdi_init(parent);
-
                i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
                i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
                i2c_register_board_info(2, mop500_i2c2_devices,
                                        ARRAY_SIZE(mop500_i2c2_devices));
 
+               mop500_uib_init();
+
        } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
-               platform_add_devices(snowball_platform_devs,
-                               ARRAY_SIZE(snowball_platform_devs));
+               /*
+                * Devices to be DT:ed:
+                *   snowball_led_dev   = todo
+                *   snowball_key_dev   = todo
+                *   snowball_sbnet_dev = done
+                *   ab8500_device      = done
+                */
+               platform_add_devices(snowball_of_platform_devs,
+                               ARRAY_SIZE(snowball_of_platform_devs));
 
                snowball_sdi_init(parent);
        } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
@@ -898,6 +870,8 @@ static void __init u8500_init_machine(void)
                i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
                i2c_register_board_info(2, mop500_i2c2_devices,
                                        ARRAY_SIZE(mop500_i2c2_devices));
+
+               mop500_uib_init();
        }
        mop500_i2c_init(parent);
 
index bc44c07c71a99a6fcf4f66ba7b3193f1ac0dc911..2f87b25a908a31c74264ac0b7a29eb34f85f2c58 100644 (file)
@@ -89,7 +89,11 @@ void __init mop500_pinmaps_init(void);
 void __init snowball_pinmaps_init(void);
 void __init hrefv60_pinmaps_init(void);
 
+int __init mop500_uib_init(void);
 void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
                unsigned n);
 
+/* TODO: Once all pieces are DT:ed, remove completely. */
+struct device * __init u8500_of_init_devices(void);
+
 #endif
index 16169c4bf6ca5226b748bee3d50d6f6b76f8138e..33275eb4c6890ab7745bbf0d2b2601ecca5e413f 100644 (file)
@@ -140,7 +140,6 @@ static struct platform_device *platform_devs[] __initdata = {
 static struct platform_device *of_platform_devs[] __initdata = {
        &u8500_dma40_device,
        &db8500_pmu_device,
-       &db8500_prcmu_device,
 };
 
 static resource_size_t __initdata db8500_gpio_base[] = {
@@ -219,6 +218,28 @@ struct device * __init u8500_init_devices(void)
        db8500_add_gpios(parent);
        db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
 
+       platform_device_register_data(parent,
+               "cpufreq-u8500", -1, NULL, 0);
+
+       for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
+               platform_devs[i]->dev.parent = parent;
+
+       platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
+
+       return parent;
+}
+
+/* TODO: Once all pieces are DT:ed, remove completely. */
+struct device * __init u8500_of_init_devices(void)
+{
+       struct device *parent;
+       int i;
+
+       parent = db8500_soc_device_init();
+
+       db8500_add_rtc(parent);
+       db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
+
        platform_device_register_data(parent,
                "cpufreq-u8500", -1, NULL, 0);
 
@@ -229,7 +250,7 @@ struct device * __init u8500_init_devices(void)
         * Devices to be DT:ed:
         *   u8500_dma40_device  = todo
         *   db8500_pmu_device   = todo
-        *   db8500_prcmu_device = todo
+        *   db8500_prcmu_device = done
         */
        platform_add_devices(of_platform_devs, ARRAY_SIZE(of_platform_devs));
 
index 741e71feca784134e179a11d0d4389fc80b70a32..66e7f00884ab4b4443d51f56c211ae7ac6385f49 100644 (file)
@@ -63,8 +63,10 @@ static void __init ux500_timer_init(void)
 
        /* TODO: Once MTU has been DT:ed place code above into else. */
        if (of_have_populated_dt()) {
+#ifdef CONFIG_OF
                np = of_find_matching_node(NULL, prcmu_timer_of_match);
                if (!np)
+#endif
                        goto dt_fail;
 
                tmp_base = of_iomap(np, 0);
index cf4687ee2a7bafe581cc32bd67dfa6ebfcff275e..cd8ea3588f93d3db2b557def5d0e844cf101b380 100644 (file)
@@ -169,26 +169,13 @@ static struct map_desc versatile_io_desc[] __initdata = {
                .pfn            = __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
                .length         = VERSATILE_PCI_CFG_BASE_SIZE,
                .type           = MT_DEVICE
-       },
-#if 0
-       {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE0,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
-               .length         = SZ_16M,
-               .type           = MT_DEVICE
        }, {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE1,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1),
-               .length         = SZ_16M,
-               .type           = MT_DEVICE
-       }, {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE2,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2),
-               .length         = SZ_16M,
+               .virtual        =  (unsigned long)VERSATILE_PCI_VIRT_MEM_BASE0,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
+               .length         = IO_SPACE_LIMIT,
                .type           = MT_DEVICE
        },
 #endif
-#endif
 };
 
 void __init versatile_map_io(void)
index 4d4973dd8fba88f5542c6898390f940ae9f8399e..408e58da46c641a81098622a028956f38e4e4253 100644 (file)
@@ -29,8 +29,9 @@
  */
 #define VERSATILE_PCI_VIRT_BASE                (void __iomem *)0xe8000000ul
 #define VERSATILE_PCI_CFG_VIRT_BASE    (void __iomem *)0xe9000000ul
+#define VERSATILE_PCI_VIRT_MEM_BASE0   (void __iomem *)PCIO_BASE
 
-/* macro to get at IO space when running virtually */
+/* macro to get at MMIO space when running virtually */
 #define IO_ADDRESS(x)          (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
 
 #define __io_address(n)                ((void __iomem __force *)IO_ADDRESS(n))
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
new file mode 100644 (file)
index 0000000..0406513
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *  arch/arm/mach-versatile/include/mach/io.h
+ *
+ *  Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define PCIO_BASE      0xeb000000ul
+
+#define __io(a)                ((a) + PCIO_BASE)
+
+#endif
index 15c6a00000ec4604f7f491c2c11fdebccbc6982f..e95bf84cc837550650ccc053e5afc72486d6682a 100644 (file)
@@ -169,11 +169,18 @@ static struct pci_ops pci_versatile_ops = {
        .write  = versatile_write_config,
 };
 
+static struct resource io_port = {
+       .name   = "PCI",
+       .start  = 0,
+       .end    = IO_SPACE_LIMIT,
+       .flags  = IORESOURCE_IO,
+};
+
 static struct resource io_mem = {
        .name   = "PCI I/O space",
        .start  = VERSATILE_PCI_MEM_BASE0,
        .end    = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
-       .flags  = IORESOURCE_IO,
+       .flags  = IORESOURCE_MEM,
 };
 
 static struct resource non_mem = {
@@ -200,6 +207,12 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
                       "memory region (%d)\n", ret);
                goto out;
        }
+       ret = request_resource(&ioport_resource, &io_port);
+       if (ret) {
+               printk(KERN_ERR "PCI: unable to allocate I/O "
+                      "port region (%d)\n", ret);
+               goto out;
+       }
        ret = request_resource(&iomem_resource, &non_mem);
        if (ret) {
                printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
@@ -218,7 +231,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
         * the mem resource for this bus
         * the prefetch mem resource for this bus
         */
-       pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset);
+       pci_add_resource_offset(&sys->resources, &io_port, sys->io_offset);
        pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset);
        pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset);
 
@@ -249,6 +262,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
 
        if (nr == 0) {
                sys->mem_offset = 0;
+               sys->io_offset = 0;
                ret = pci_versatile_setup_resources(sys);
                if (ret < 0) {
                        printk("pci_versatile_setup: resources... oops?\n");
@@ -325,7 +339,6 @@ void __init pci_versatile_preinit(void)
 static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq;
-       int devslot = PCI_SLOT(dev->devfn);
 
        /* slot,  pin,  irq
         *  24     1     27
index 04dd092211b893271fc65df344ef42adb1e3f754..fde26adaef32d964a539a12a97f4596af27b1fe1 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/ata_platform.h>
 #include <linux/smsc911x.h>
 #include <linux/spinlock.h>
-#include <linux/device.h>
 #include <linux/usb/isp1760.h>
 #include <linux/clkdev.h>
 #include <linux/mtd/physmap.h>
@@ -31,7 +30,6 @@
 #include <asm/hardware/gic.h>
 #include <asm/hardware/timer-sp.h>
 #include <asm/hardware/sp810.h>
-#include <asm/hardware/gic.h>
 
 #include <mach/ct-ca9x4.h>
 #include <mach/motherboard.h>
index ea6b43154090a2af00cbb168c7ef2d3a0589b2b2..655878bcc96d265a2ec81f87112ba48d5b5a037e 100644 (file)
@@ -228,7 +228,7 @@ static pte_t **consistent_pte;
 
 #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
 
-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
 
 void __init init_consistent_dma_size(unsigned long size)
 {
@@ -268,10 +268,8 @@ static int __init consistent_init(void)
        unsigned long base = consistent_base;
        unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
-#ifndef CONFIG_ARM_DMA_USE_IOMMU
-       if (cpu_architecture() >= CPU_ARCH_ARMv6)
+       if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
                return 0;
-#endif
 
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
@@ -323,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
        .vm_list        = LIST_HEAD_INIT(coherent_head.vm_list),
 };
 
-size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
 
 static int __init early_coherent_pool(char *p)
 {
@@ -342,7 +340,7 @@ static int __init coherent_init(void)
        struct page *page;
        void *ptr;
 
-       if (cpu_architecture() < CPU_ARCH_ARMv6)
+       if (!IS_ENABLED(CONFIG_CMA))
                return 0;
 
        ptr = __alloc_from_contiguous(NULL, size, prot, &page);
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 
        if (arch_is_coherent() || nommu())
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
-       else if (cpu_architecture() < CPU_ARCH_ARMv6)
+       else if (!IS_ENABLED(CONFIG_CMA))
                addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
        else if (gfp & GFP_ATOMIC)
                addr = __alloc_from_pool(dev, size, &page, caller);
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 
        if (arch_is_coherent() || nommu()) {
                __dma_free_buffer(page, size);
-       } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
+       } else if (!IS_ENABLED(CONFIG_CMA)) {
                __dma_free_remap(cpu_addr, size);
                __dma_free_buffer(page, size);
        } else {
@@ -1069,7 +1067,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
                return NULL;
 
        while (count) {
-               int j, order = __ffs(count);
+               int j, order = __fls(count);
 
                pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
                while (!pages[i] && order)
@@ -1093,7 +1091,7 @@ error:
        while (--i)
                if (pages[i])
                        __free_pages(pages[i], 0);
-       if (array_size < PAGE_SIZE)
+       if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
                vfree(pages);
@@ -1108,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
        for (i = 0; i < count; i++)
                if (pages[i])
                        __free_pages(pages[i], 0);
-       if (array_size < PAGE_SIZE)
+       if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
                vfree(pages);
index c21d06c7dd7ec0ae4bdf036b22b74983aa2e5c6f..f54d59219764bc314b1cd10c587ea76f87fd70a9 100644 (file)
@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
  * allocations.  This must be the smallest DMA mask in the system,
  * so a successful GFP_DMA allocation will always satisfy this.
  */
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
        unsigned long dma_size)
index 93dc0c17cdcbddf0f5294be8ea6c022aa693ca1f..2e8a1efdf7b85f3443294b396ec5673496dd635d 100644 (file)
@@ -62,9 +62,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 #endif
 
 #ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
 #else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
 #endif
 
 extern phys_addr_t arm_lowmem_limit;
index e5dad60b558b468315294b2c8b95c70193b6f74d..cf4528d5177448fb79cb5dc6689c873c2dbddb4a 100644 (file)
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        }
 }
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+       struct vm_struct *vm;
+
+       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm->addr = (void *)addr;
+       vm->size = SECTION_SIZE;
+       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->caller = pmd_empty_section_gap;
+       vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+       struct vm_struct *vm;
+       unsigned long addr, next = 0;
+       pmd_t *pmd;
+
+       /* we're still single threaded hence no lock needed here */
+       for (vm = vmlist; vm; vm = vm->next) {
+               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+                       continue;
+               addr = (unsigned long)vm->addr;
+               if (addr < next)
+                       continue;
+
+               /*
+                * Check if this vm starts on an odd section boundary.
+                * If so and the first section entry for this PMD is free
+                * then we block the corresponding virtual address.
+                */
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr);
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr & PMD_MASK);
+               }
+
+               /*
+                * Then check if this vm ends on an odd section boundary.
+                * If so and the second section entry for this PMD is empty
+                * then we block the corresponding virtual address.
+                */
+               addr += vm->size;
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr) + 1;
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr);
+               }
+
+               /* no need to look at any vm entry until we hit the next PMD */
+               next = (addr + PMD_SIZE - 1) & PMD_MASK;
+       }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
        (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
        if (mdesc->map_io)
                mdesc->map_io();
+       fill_pmd_gaps();
 
        /*
         * Finally flush the caches and tlb to ensure that we're in a
index 62135849f48b75de671a045125cd2a670cb5c59c..c641fb6850170be36e7e47846a0fe4562af4fc52 100644 (file)
@@ -762,6 +762,11 @@ b_epilogue:
                        update_on_xread(ctx);
                        emit(ARM_MOV_R(r_A, r_X), ctx);
                        break;
+               case BPF_S_ANC_ALU_XOR_X:
+                       /* A ^= X */
+                       update_on_xread(ctx);
+                       emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
+                       break;
                case BPF_S_ANC_PROTOCOL:
                        /* A = ntohs(skb->protocol) */
                        ctx->seen |= SEEN_SKB;
index 99ae5e3f46d2d7b29a755be7935178626aa471fe..7fa2f7d3cb90a4329f2f844b6555a9d81a42f3a8 100644 (file)
@@ -68,6 +68,8 @@
 #define ARM_INST_CMP_R         0x01500000
 #define ARM_INST_CMP_I         0x03500000
 
+#define ARM_INST_EOR_R         0x00200000
+
 #define ARM_INST_LDRB_I                0x05d00000
 #define ARM_INST_LDRB_R                0x07d00000
 #define ARM_INST_LDRH_I                0x01d000b0
 #define ARM_CMP_R(rn, rm)      _AL3_R(ARM_INST_CMP, 0, rn, rm)
 #define ARM_CMP_I(rn, imm)     _AL3_I(ARM_INST_CMP, 0, rn, imm)
 
+#define ARM_EOR_R(rd, rn, rm)  _AL3_R(ARM_INST_EOR, rd, rn, rm)
+
 #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
                                 | (off))
 #define ARM_LDRB_I(rt, rn, off)        (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
index 9129c9e7d532b2a08f2f464e489aeabad5f1badf..88726f4dbbfa60b774e942176056c39f0b31fc66 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/irq.h>
 #include <linux/clockchips.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #include <mach/hardware.h>
 #include <asm/mach/time.h>
@@ -201,8 +202,16 @@ static int __init epit_clockevent_init(struct clk *timer_clk)
        return 0;
 }
 
-void __init epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init epit_timer_init(void __iomem *base, int irq)
 {
+       struct clk *timer_clk;
+
+       timer_clk = clk_get_sys("imx-epit.0", NULL);
+       if (IS_ERR(timer_clk)) {
+               pr_err("i.MX epit: unable to get clk\n");
+               return;
+       }
+
        clk_prepare_enable(timer_clk);
 
        timer_base = base;
index cf663d84e7c1d1397291e4667c526087c3aaca56..e429ca1b814a179522bdf2bc9bb9e514ac136efc 100644 (file)
@@ -54,8 +54,8 @@ extern void imx50_soc_init(void);
 extern void imx51_soc_init(void);
 extern void imx53_soc_init(void);
 extern void imx51_init_late(void);
-extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
-extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
+extern void epit_timer_init(void __iomem *base, int irq);
+extern void mxc_timer_init(void __iomem *, int);
 extern int mx1_clocks_init(unsigned long fref);
 extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
 extern int mx25_clocks_init(void);
index 7ded6f1f74bc63f0e701d4d2101df2e0ca003cf8..3c080a32dbf580d7669cc84adac7e5c510d76f6b 100644 (file)
@@ -23,6 +23,7 @@
 #ifndef __MACH_MX2_CAM_H_
 #define __MACH_MX2_CAM_H_
 
+#define MX2_CAMERA_SWAP16              (1 << 0)
 #define MX2_CAMERA_EXT_VSYNC           (1 << 1)
 #define MX2_CAMERA_CCIR                        (1 << 2)
 #define MX2_CAMERA_CCIR_INTERLACE      (1 << 3)
@@ -30,6 +31,7 @@
 #define MX2_CAMERA_GATED_CLOCK         (1 << 5)
 #define MX2_CAMERA_INV_DATA            (1 << 6)
 #define MX2_CAMERA_PCLK_SAMPLE_RISING  (1 << 7)
+#define MX2_CAMERA_PACK_DIR_MSB                (1 << 8)
 
 /**
  * struct mx2_camera_platform_data - optional platform data for mx2_camera
index 99f958ca6cb8c34ac650896418d1dc8e1a4cd247..00e8e659e66762104fc487cada9bbd8e81968df8 100644 (file)
@@ -58,6 +58,7 @@
 /* MX31, MX35, MX25, MX5 */
 #define V2_TCTL_WAITEN         (1 << 3) /* Wait enable mode */
 #define V2_TCTL_CLK_IPG                (1 << 6)
+#define V2_TCTL_CLK_PER                (2 << 6)
 #define V2_TCTL_FRR            (1 << 9)
 #define V2_IR                  0x0c
 #define V2_TSTAT               0x08
@@ -280,23 +281,22 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
        return 0;
 }
 
-void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init mxc_timer_init(void __iomem *base, int irq)
 {
        uint32_t tctl_val;
+       struct clk *timer_clk;
        struct clk *timer_ipg_clk;
 
-       if (!timer_clk) {
-               timer_clk = clk_get_sys("imx-gpt.0", "per");
-               if (IS_ERR(timer_clk)) {
-                       pr_err("i.MX timer: unable to get clk\n");
-                       return;
-               }
-
-               timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
-               if (!IS_ERR(timer_ipg_clk))
-                       clk_prepare_enable(timer_ipg_clk);
+       timer_clk = clk_get_sys("imx-gpt.0", "per");
+       if (IS_ERR(timer_clk)) {
+               pr_err("i.MX timer: unable to get clk\n");
+               return;
        }
 
+       timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+       if (!IS_ERR(timer_ipg_clk))
+               clk_prepare_enable(timer_ipg_clk);
+
        clk_prepare_enable(timer_clk);
 
        timer_base = base;
@@ -309,7 +309,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
        __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
 
        if (timer_is_v2())
-               tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
+               tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
        else
                tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
 
index 62ec5c452792706407922b5ea0173009f6247622..706b7e29397f5ebfc87a5de092eb4fd330a635ba 100644 (file)
@@ -461,6 +461,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
        struct clk *c;
        struct clk *pa;
 
+       mutex_lock(&clocks_mutex);
        seq_printf(s, "%-30s %-30s %-10s %s\n",
                "clock-name", "parent-name", "rate", "use-count");
 
@@ -469,6 +470,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
                seq_printf(s, "%-30s %-30s %-10lu %d\n",
                        c->name, pa ? pa->name : "none", c->rate, c->usecount);
        }
+       mutex_unlock(&clocks_mutex);
 
        return 0;
 }
index 297245dba66e4c286a73f08b90e39aa062739155..de6c0a08f4615d6fba3f07b316285f9a9b47095a 100644 (file)
@@ -252,8 +252,6 @@ IS_AM_SUBCLASS(335x, 0x335)
  * cpu_is_omap2423():  True for OMAP2423
  * cpu_is_omap2430():  True for OMAP2430
  * cpu_is_omap3430():  True for OMAP3430
- * cpu_is_omap3505():  True for OMAP3505
- * cpu_is_omap3517():  True for OMAP3517
  */
 #define GET_OMAP_TYPE  ((omap_rev() >> 16) & 0xffff)
 
@@ -277,8 +275,6 @@ IS_OMAP_TYPE(2422, 0x2422)
 IS_OMAP_TYPE(2423, 0x2423)
 IS_OMAP_TYPE(2430, 0x2430)
 IS_OMAP_TYPE(3430, 0x3430)
-IS_OMAP_TYPE(3505, 0x3517)
-IS_OMAP_TYPE(3517, 0x3517)
 
 #define cpu_is_omap310()               0
 #define cpu_is_omap730()               0
@@ -293,12 +289,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define cpu_is_omap2422()              0
 #define cpu_is_omap2423()              0
 #define cpu_is_omap2430()              0
-#define cpu_is_omap3503()              0
-#define cpu_is_omap3515()              0
-#define cpu_is_omap3525()              0
-#define cpu_is_omap3530()              0
-#define cpu_is_omap3505()              0
-#define cpu_is_omap3517()              0
 #define cpu_is_omap3430()              0
 #define cpu_is_omap3630()              0
 
@@ -350,12 +340,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 
 #if defined(CONFIG_ARCH_OMAP3)
 # undef cpu_is_omap3430
-# undef cpu_is_omap3503
-# undef cpu_is_omap3515
-# undef cpu_is_omap3525
-# undef cpu_is_omap3530
-# undef cpu_is_omap3505
-# undef cpu_is_omap3517
 # undef cpu_is_ti81xx
 # undef cpu_is_ti816x
 # undef cpu_is_ti814x
@@ -363,19 +347,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 # undef cpu_is_am33xx
 # undef cpu_is_am335x
 # define cpu_is_omap3430()             is_omap3430()
-# define cpu_is_omap3503()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_iva()) &&   \
-                                               (!omap3_has_sgx()))
-# define cpu_is_omap3515()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_iva()) &&   \
-                                               (omap3_has_sgx()))
-# define cpu_is_omap3525()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_sgx()) &&   \
-                                               (omap3_has_iva()))
-# define cpu_is_omap3530()             (cpu_is_omap3430())
-# define cpu_is_omap3517()             is_omap3517()
-# define cpu_is_omap3505()             (cpu_is_omap3517() &&           \
-                                               !omap3_has_sgx())
 # undef cpu_is_omap3630
 # define cpu_is_omap3630()             is_omap363x()
 # define cpu_is_ti81xx()               is_ti81xx()
@@ -424,10 +395,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define OMAP3630_REV_ES1_1     (OMAP363X_CLASS | (0x1 << 8))
 #define OMAP3630_REV_ES1_2     (OMAP363X_CLASS | (0x2 << 8))
 
-#define OMAP3517_CLASS         0x35170034
-#define OMAP3517_REV_ES1_0     OMAP3517_CLASS
-#define OMAP3517_REV_ES1_1     (OMAP3517_CLASS | (0x1 << 8))
-
 #define TI816X_CLASS           0x81600034
 #define TI8168_REV_ES1_0       TI816X_CLASS
 #define TI8168_REV_ES1_1       (TI816X_CLASS | (0x1 << 8))
index 1527929b445a44a75ee8433fb26afce169048ae1..f37764a36072e4b5cbdd632e0cc6369f9de5e6fc 100644 (file)
@@ -92,6 +92,8 @@ enum omap_ecc {
        OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
                /* 1-bit ecc: stored at beginning of spare area as romcode */
        OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
+       OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */
+       OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */
 };
 
 /*
@@ -157,4 +159,13 @@ extern int gpmc_nand_write(int cs, int cmd, int wval);
 
 int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size);
 int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code);
+
+#ifdef CONFIG_ARCH_OMAP3
+int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors);
+int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
+                         int nerrors);
+int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc);
+int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc);
+#endif /* CONFIG_ARCH_OMAP3 */
+
 #endif
index a7754a886d428af5599c00c4fefdcb4d2d058d12..5493bd95da5ee9f6988bb386a84151511a30b931 100644 (file)
@@ -172,8 +172,7 @@ struct omap_mmc_platform_data {
 extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
                                        int is_closed);
 
-#if    defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
-       defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
 void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
                                int nr_controllers);
 void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
@@ -185,7 +184,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 {
 }
-
 #endif
 
 extern int omap_msdi_reset(struct omap_hwmod *oh);
index 61fd837624a8532dc38e13a078d1d83941afc1d8..c1793786aea989de24668d31529f1d22eb1e7012 100644 (file)
@@ -582,7 +582,7 @@ void __init orion_spi_1_init(unsigned long mapbase)
  * Watchdog
  ****************************************************************************/
 static struct resource orion_wdt_resource =
-               DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
+               DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x28);
 
 static struct platform_device orion_wdt_device = {
        .name           = "orion_wdt",
index 58b79809d20cd47a6a0579f91d49084d027f73e7..584c9bf8ed2d0feed3894fa1b672f26ab70365a5 100644 (file)
@@ -193,6 +193,7 @@ static const struct platform_device_id ssp_id_table[] = {
        { "pxa25x-nssp",        PXA25x_NSSP },
        { "pxa27x-ssp",         PXA27x_SSP },
        { "pxa168-ssp",         PXA168_SSP },
+       { "pxa910-ssp",         PXA910_SSP },
        { },
 };
 
index 33ecd0c9f0c3ecfdc63aaf44cab40a8deabd5f0c..b1e05ccff3acdb2ff4e5585ea1fb4947ebd18f28 100644 (file)
@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
                return -EINVAL;
        }
 
-       if (client->is_ts && adc->ts_pend)
-               return -EAGAIN;
-
        spin_lock_irqsave(&adc->lock, flags);
 
+       if (client->is_ts && adc->ts_pend) {
+               spin_unlock_irqrestore(&adc->lock, flags);
+               return -EAGAIN;
+       }
+
        client->channel = channel;
        client->nr_samples = nr_samples;
 
index 1d214cb9d77050c921dc7e69f8b165c2eb89ee7e..6303974c2ee06af0e81a4f60437f0fac86d2f4f6 100644 (file)
@@ -126,7 +126,8 @@ struct platform_device s3c_device_adc = {
 #ifdef CONFIG_CPU_S3C2440
 static struct resource s3c_camif_resource[] = {
        [0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF),
-       [1] = DEFINE_RES_IRQ(IRQ_CAM),
+       [1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C),
+       [2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P),
 };
 
 struct platform_device s3c_device_camif = {
index 0fedf47fa502482a117dc9427d5067d11d8e3a3d..536002ff2ab8d665993b1f554a90ac74642917ad 100644 (file)
 
 /**
  * struct s3c_fb_pd_win - per window setup data
- * @win_mode: The display parameters to initialise (not for window 0)
+ * @xres     : The window X size.
+ * @yres     : The window Y size.
  * @virtual_x: The virtual X size.
  * @virtual_y: The virtual Y size.
  */
 struct s3c_fb_pd_win {
-       struct fb_videomode     win_mode;
-
        unsigned short          default_bpp;
        unsigned short          max_bpp;
+       unsigned short          xres;
+       unsigned short          yres;
        unsigned short          virtual_x;
        unsigned short          virtual_y;
 };
@@ -45,6 +46,7 @@ struct s3c_fb_pd_win {
  * @default_win: default window layer number to be used for UI layer.
  * @vidcon0: The base vidcon0 values to control the panel data format.
  * @vidcon1: The base vidcon1 values to control the panel data output.
+ * @vtiming: Video timing when connected to a RGB type panel.
  * @win: The setup data for each hardware window, or NULL for unused.
  * @display_mode: The LCD output display mode.
  *
@@ -58,8 +60,7 @@ struct s3c_fb_platdata {
        void    (*setup_gpio)(void);
 
        struct s3c_fb_pd_win    *win[S3C_FB_MAX_WIN];
-
-       u32                      default_win;
+       struct fb_videomode     *vtiming;
 
        u32                      vidcon0;
        u32                      vidcon1;
index 7d048759b77244a1aee05507bc3813f9b55e0784..c0c70a895ca832e865f8c0b820e0fd9c85012cf2 100644 (file)
@@ -22,7 +22,7 @@
 #define S3C24XX_VA_WATCHDOG    S3C_VA_WATCHDOG
 
 #define S3C2412_VA_SSMC                S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00100000)
 
 #define S3C2410_PA_UART                (0x50000000)
 #define S3C24XX_PA_UART                S3C2410_PA_UART
index de2b5bdc5ebd860a3e65020c6c63dfc75fb97dbb..7178e338e25ed8e7981e9b134dd7508aa8f4f128 100644 (file)
@@ -24,6 +24,9 @@ extern void s3c2416_init_clocks(int xtal);
 extern  int s3c2416_baseclk_add(void);
 
 extern void s3c2416_restart(char mode, const char *cmd);
+
+extern struct syscore_ops s3c2416_irq_syscore_ops;
+
 #else
 #define s3c2416_init_clocks NULL
 #define s3c2416_init_uarts NULL
index f19aff19205c3d4090c6d239f574c6d788ed8af6..bc4db9b04e36e101de8885e6285e23baf60f9b89 100644 (file)
@@ -25,7 +25,7 @@ static inline void arch_wdt_reset(void)
 
        __raw_writel(0, S3C2410_WTCON);   /* disable watchdog, to be safe  */
 
-       if (s3c2410_wdtclk)
+       if (!IS_ERR(s3c2410_wdtclk))
                clk_enable(s3c2410_wdtclk);
 
        /* put initial values into count and data */
index 031a61899beffb07843c198a3237c385e5e2100a..48a1599110372ffb77fcf8e33e89449e4a40b39d 100644 (file)
@@ -37,6 +37,7 @@ struct clk clk_ext_xtal_mux = {
 struct clk clk_xusbxti = {
        .name           = "xusbxti",
        .id             = -1,
+       .rate           = 24000000,
 };
 
 struct clk s5p_clk_27m = {
index ab3de721c5db5756618d708159d4bf3a1865b6ee..75b05ad0fbad4d6e2b259ceaa59730bab611f71f 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header for spear platform
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index e14a3e4932f98ad264955bdf5a01969018eda1f8..2bc6b54460a80245db72a745e75de6237ae11405 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 03ed8b585dcf7a5cf89d5b28304bf0d7e18b07be..88a7fbd247936e1f3f59817f7cd1a82e54b67b23 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 914d09dd50fdfa71c1e9a505b37987771e5070fe..ef95e5b780bd04d303748748cb0763e687ffedfb 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 6dd455bafdfdaf90bf4f13f8a6cc8572a763135c..2ce6cb17a98b10aba011fcb99047a49a97da5d7d 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a56a067717c112d972ac2f07e88cd444c34f5af3..12cf27f935f97372854cc1a0bd2c41251971499f 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ea0a61302b7ef56ff1650d2758dbe449d34deac9..4f990115b1bd7070c3092f3b768a7cce7e6f94a0 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform specific restart functions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 961fb7261243faf87baf47ce6ca420fc19b30d02..853e891e1184427212c3c8c16f3b1a060884e369 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer source file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 74667bfc88cc7676e4c43332b40705908c747021..9ba9e749b3f34d7c2760d1d9784ff9ede9528ef3 100644 (file)
@@ -17,9 +17,6 @@
 typedef unsigned short  __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short  __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short  __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 169268c40ae2552df9430128e4ae6e6964249989..df28841813139f658a3511daaacb5aa14012dd80 100644 (file)
@@ -281,7 +281,7 @@ syscall_exit_work:
        ld.w    r1, r0[TI_flags]
        rjmp    1b
 
-2:     mov     r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
+2:     mov     r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
        tst     r1, r2
        breq    3f
        unmask_interrupts
@@ -587,7 +587,7 @@ fault_exit_work:
        ld.w    r1, r0[TI_flags]
        rjmp    fault_exit_work
 
-1:     mov     r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
+1:     mov     r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
        tst     r1, r2
        breq    2f
        unmask_interrupts
index ae386c304beefe153fb162e2b6909edc84dd5ef4..d552a854dacccbbc8b5f8131bbdaa2203ffb74fa 100644 (file)
@@ -22,8 +22,6 @@
 #include <asm/ucontext.h>
 #include <asm/syscalls.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                               struct pt_regs *regs)
 {
@@ -89,7 +87,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -224,30 +221,27 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
 
 static inline void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs, int syscall)
+             struct pt_regs *regs, int syscall)
 {
        int ret;
 
        /*
         * Set up the stack frame
         */
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
+       ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
 
        /*
         * Check that the resulting registers are sane
         */
        ret |= !valid_user_regs(regs);
 
-       if (ret != 0) {
-               force_sigsegv(sig, current);
-               return;
-       }
-
        /*
         * Block the signal if we were successful.
         */
-       block_sigmask(ka, sig);
-       clear_thread_flag(TIF_RESTORE_SIGMASK);
+       if (ret != 0)
+               force_sigsegv(sig, current);
+       else
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -255,7 +249,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
  * doesn't want to handle. Thus you cannot kill init even with a
  * SIGKILL even by mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
+static void do_signal(struct pt_regs *regs, int syscall)
 {
        siginfo_t info;
        int signr;
@@ -267,12 +261,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
         * without doing anything if so.
         */
        if (!user_mode(regs))
-               return 0;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else if (!oldset)
-               oldset = &current->blocked;
+               return;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (syscall) {
@@ -297,15 +286,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
 
        if (signr == 0) {
                /* No signal to deliver -- put the saved sigmask back */
-               if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-               }
-               return 0;
+               restore_saved_sigmask();
+               return;
        }
 
-       handle_signal(signr, &ka, &info, oldset, regs, syscall);
-       return 1;
+       handle_signal(signr, &ka, &info, regs, syscall);
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
@@ -315,13 +300,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
        if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
                syscall = 1;
 
-       if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
-               do_signal(regs, &current->blocked, syscall);
+       if (ti->flags & _TIF_SIGPENDING)
+               do_signal(regs, syscall);
 
        if (ti->flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 41bc1875c4d7fd367bbbea5432b9332a2821557f..1bd3436db6a7b7d080bdf4bcb1a09db621fde1b0 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned int __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 02560fd8a12182f019802e2f7b4c392110c2eb66..53ad10005ae37db4fbf6c3d6cb21eee6485b8db6 100644 (file)
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
                                           TIF_NEED_RESCHED */
 #define TIF_MEMDIE             4       /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
-#define TIF_FREEZE             6       /* is freezing for suspend */
 #define TIF_IRQ_SYNC           7       /* sync pipeline stage */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE            (1<<TIF_FREEZE)
 #define _TIF_IRQ_SYNC          (1<<TIF_IRQ_SYNC)
 #define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP                (1<<TIF_SINGLESTEP)
index 2e3994b20169773f7c92c7369ad0cbc0944f54a8..62bcea7dcc6dff30bdaa5a4793ab336aef5ed688 100644 (file)
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
        unsigned long newsp;
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
-       if (current->rt.nr_cpus_allowed == num_possible_cpus())
+       if (current->nr_cpus_allowed == num_possible_cpus())
                set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
 #endif
 
index e5bbc1a5edc242dae7221168f771bbad704872d3..6682b73a8523f789bf2e3fa722f068f1ef4ebf79 100644 (file)
@@ -19,8 +19,6 @@
 #include <asm/fixed_code.h>
 #include <asm/syscall.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* Location of the trace bit in SYSCFG. */
 #define TRACE_BITS 0x0001
 
@@ -98,7 +96,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -190,17 +187,22 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
        err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
        if (err)
-               goto give_sigsegv;
+               return -EFAULT;
 
        /* Set up registers for signal handler */
-       wrusp((unsigned long)frame);
        if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor *) ka->sa.sa_handler;
-               __get_user(regs->pc, &funcptr->text);
-               __get_user(regs->p3, &funcptr->GOT);
+               u32 pc, p3;
+               err |= __get_user(pc, &funcptr->text);
+               err |= __get_user(p3, &funcptr->GOT);
+               if (err)
+                       return -EFAULT;
+               regs->pc = pc;
+               regs->p3 = p3;
        } else
                regs->pc = (unsigned long)ka->sa.sa_handler;
+       wrusp((unsigned long)frame);
        regs->rets = SIGRETURN_STUB;
 
        regs->r0 = frame->sig;
@@ -208,10 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
        regs->r2 = (unsigned long)(&frame->uc);
 
        return 0;
-
- give_sigsegv:
-       force_sigsegv(sig, current);
-       return -EFAULT;
 }
 
 static inline void
@@ -247,24 +245,21 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
-       int ret;
-
        /* are we from a system call? to see pt_regs->orig_p0 */
        if (regs->orig_p0 >= 0)
                /* If so, check system call restarting.. */
                handle_restart(regs, ka, 1);
 
        /* set up the stack frame */
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
-
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+               force_sigsegv(sig, current);
+       else 
+               signal_delivered(sig, info, ka, regs,
+                               test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -281,37 +276,16 @@ asmlinkage void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        current->thread.esp0 = (unsigned long)regs;
 
-       if (try_to_freeze())
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                               test_thread_flag(TIF_SINGLESTEP));
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
 
- no_signal:
        /* Did we come from a system call? */
        if (regs->orig_p0 >= 0)
                /* Restart the system call - no handlers present */
@@ -319,10 +293,7 @@ asmlinkage void do_signal(struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -330,14 +301,12 @@ asmlinkage void do_signal(struct pt_regs *regs)
  */
 asmlinkage void do_notify_resume(struct pt_regs *regs)
 {
-       if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs);
 
        if (test_thread_flag(TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index 44bbf2f564cb7953d076a63b3940b400a2b330d8..f7f7a18abca915773ad291bd74a8ef192ffde333 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/hardirq.h>
 #include <linux/thread_info.h>
 #include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
@@ -27,8 +29,7 @@ void decode_address(char *buf, unsigned long address)
 {
        struct task_struct *p;
        struct mm_struct *mm;
-       unsigned long flags, offset;
-       unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+       unsigned long offset;
        struct rb_node *n;
 
 #ifdef CONFIG_KALLSYMS
@@ -112,17 +113,17 @@ void decode_address(char *buf, unsigned long address)
         * mappings of all our processes and see if we can't be a whee
         * bit more specific
         */
-       write_lock_irqsave(&tasklist_lock, flags);
+       read_lock(&tasklist_lock);
        for_each_process(p) {
-               mm = (in_atomic ? p->mm : get_task_mm(p));
-               if (!mm)
-                       continue;
+               struct task_struct *t;
 
-               if (!down_read_trylock(&mm->mmap_sem)) {
-                       if (!in_atomic)
-                               mmput(mm);
+               t = find_lock_task_mm(p);
+               if (!t)
                        continue;
-               }
+
+               mm = t->mm;
+               if (!down_read_trylock(&mm->mmap_sem))
+                       goto __continue;
 
                for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
                        struct vm_area_struct *vma;
@@ -131,7 +132,7 @@ void decode_address(char *buf, unsigned long address)
 
                        if (address >= vma->vm_start && address < vma->vm_end) {
                                char _tmpbuf[256];
-                               char *name = p->comm;
+                               char *name = t->comm;
                                struct file *file = vma->vm_file;
 
                                if (file) {
@@ -164,8 +165,7 @@ void decode_address(char *buf, unsigned long address)
                                                name, vma->vm_start, vma->vm_end);
 
                                up_read(&mm->mmap_sem);
-                               if (!in_atomic)
-                                       mmput(mm);
+                               task_unlock(t);
 
                                if (buf[0] == '\0')
                                        sprintf(buf, "[ %s ] dynamic memory", name);
@@ -175,8 +175,8 @@ void decode_address(char *buf, unsigned long address)
                }
 
                up_read(&mm->mmap_sem);
-               if (!in_atomic)
-                       mmput(mm);
+__continue:
+               task_unlock(t);
        }
 
        /*
@@ -186,7 +186,7 @@ void decode_address(char *buf, unsigned long address)
        sprintf(buf, "/* kernel dynamic memory */");
 
 done:
-       write_unlock_irqrestore(&tasklist_lock, flags);
+       read_unlock(&tasklist_lock);
 }
 
 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
index f6ffd6f054c398201dfea4e21aaf4c3b6d044719..0b74218fdd3a89ec5ee00cf4e74c09ee4ddbc0ed 100644 (file)
@@ -248,8 +248,6 @@ static struct platform_device bfin_uart0_device = {
 
 #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition bfin_plat_nand_partitions[] = {
        {
         .name = "params(nand)",
@@ -289,7 +287,6 @@ static struct platform_nand_data bfin_plat_nand_data = {
        .chip = {
                 .nr_chips = 1,
                 .chip_delay = 30,
-                .part_probe_types = part_probes,
                 .partitions = bfin_plat_nand_partitions,
                 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
                 },
index 80aa2535e2c9f2be80c6f78d6a49c832f7e8be3d..04c2fbe41a7ff3e7193532b1e809b8dbcb7474ee 100644 (file)
@@ -711,8 +711,6 @@ ENTRY(_system_call)
        jump .Lresume_userspace_1;
 
 .Lsyscall_sigpending:
-       cc = BITTST(r7, TIF_RESTORE_SIGMASK);
-       if cc jump .Lsyscall_do_signals;
        cc = BITTST(r7, TIF_SIGPENDING);
        if cc jump .Lsyscall_do_signals;
        cc = BITTST(r7, TIF_NOTIFY_RESUME);
index cf37478c1169c59411bc6926142c020a6348ec26..3d8f3c22a94fa0adf7e66fe2a793a65326759990 100644 (file)
@@ -20,8 +20,6 @@
 #include <asm/cacheflush.h>
 
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Do a signal return, undo the signal stack.
  */
@@ -87,7 +85,6 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -248,10 +245,9 @@ do_restart:
 /*
  * handle the actual delivery of a signal to userspace
  */
-static int handle_signal(int sig,
+static void handle_signal(int sig,
                         siginfo_t *info, struct k_sigaction *ka,
-                        sigset_t *oldset, struct pt_regs *regs,
-                        int syscall)
+                        struct pt_regs *regs, int syscall)
 {
        int ret;
 
@@ -278,11 +274,9 @@ static int handle_signal(int sig,
        }
 
        /* Set up the stack frame */
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+               return;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -292,7 +286,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
 {
        struct k_sigaction ka;
        siginfo_t info;
-       sigset_t *oldset;
        int signr;
 
        /* we want the common case to go fast, which is why we may in certain
@@ -300,25 +293,9 @@ static void do_signal(struct pt_regs *regs, int syscall)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               if (handle_signal(signr, &info, &ka, oldset,
-                                 regs, syscall) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, regs, 0);
-               }
-
+               handle_signal(signr, &info, &ka, regs, syscall);
                return;
        }
 
@@ -343,10 +320,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -357,14 +331,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
                                 int syscall)
 {
        /* deal with pending signal delivery */
-       if (thread_info_flags & ((1 << TIF_SIGPENDING) |
-                                (1 << TIF_RESTORE_SIGMASK)))
+       if (thread_info_flags & (1 << TIF_SIGPENDING))
                do_signal(regs, syscall);
 
        if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index e16f8f297f61a0376f8b6b4497675a742ee9bf87..0bb477c13a4e08b6bd09ceee6e22e9a11ac2a6b0 100644 (file)
@@ -31,8 +31,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* a syscall in Linux/CRIS is a break 13 instruction which is 2 bytes */
 /* manipulate regs so that upon return, it will be re-executed */
 
@@ -176,7 +174,6 @@ asmlinkage int sys_sigreturn(long r10, long r11, long r12, long r13, long mof,
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc))
@@ -212,7 +209,6 @@ asmlinkage int sys_rt_sigreturn(long r10, long r11, long r12, long r13,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -415,10 +411,11 @@ give_sigsegv:
  * OK, we're invoking a handler
  */
 
-static inline int handle_signal(int canrestart, unsigned long sig,
+static inline void handle_signal(int canrestart, unsigned long sig,
        siginfo_t *info, struct k_sigaction *ka,
-       sigset_t *oldset, struct pt_regs *regs)
+       struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -456,9 +453,7 @@ static inline int handle_signal(int canrestart, unsigned long sig,
                ret = setup_frame(sig, ka, oldset, regs);
 
        if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -478,7 +473,6 @@ void do_signal(int canrestart, struct pt_regs *regs)
        siginfo_t info;
        int signr;
         struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -489,23 +483,10 @@ void do_signal(int canrestart, struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(canrestart, signr, &info, &ka,
-                               oldset, regs)) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(canrestart, signr, &info, &ka, regs);
                return;
        }
 
@@ -525,8 +506,5 @@ void do_signal(int canrestart, struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
index b338d8fc0c1241c12aa66495e474d16cd5d49f4a..b60d1b65a4267ef5cbdb14758058b5c9ca6707a0 100644 (file)
@@ -24,9 +24,6 @@
 
 extern unsigned long cris_signal_return_page;
 
-/* Flag to check if a signal is blockable. */
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * A syscall in CRIS is really a "break 13" instruction, which is 2
  * bytes. The registers is manipulated so upon return the instruction
@@ -167,7 +164,6 @@ sys_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
                                                 sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc))
@@ -208,7 +204,6 @@ sys_rt_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -434,11 +429,12 @@ give_sigsegv:
 }
 
 /* Invoke a signal handler to, well, handle the signal. */
-static inline int
+static inline void
 handle_signal(int canrestart, unsigned long sig,
              siginfo_t *info, struct k_sigaction *ka,
-              sigset_t *oldset, struct pt_regs * regs)
+              struct pt_regs * regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Check if this got called from a system call. */
@@ -489,9 +485,7 @@ handle_signal(int canrestart, unsigned long sig,
                ret = setup_frame(sig, ka, oldset, regs);
 
        if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -511,7 +505,6 @@ do_signal(int canrestart, struct pt_regs *regs)
        int signr;
        siginfo_t info;
         struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * The common case should go fast, which is why this point is
@@ -521,25 +514,11 @@ do_signal(int canrestart, struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(canrestart, signr, &info, &ka,
-                               oldset, regs)) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
+               handle_signal(canrestart, signr, &info, &ka, regs);
                return;
        }
 
@@ -560,10 +539,7 @@ do_signal(int canrestart, struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 asmlinkage void
index 234891c74e2bbe21d0aed4301a7552200829c101..ce4e517931514fb0f6d52a2ef1af35b39aa19ea0 100644 (file)
@@ -15,9 +15,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short  __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index d114ad3da9b15f95ccc5a68b736d5e80bae75af7..58d44ee1a71f70885aef328fdf6a6af32d6c7b58 100644 (file)
@@ -40,7 +40,5 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 3f34cb45fbb3fafd24edff901ca8b52d9941532d..fe512af74a5afbb57dbc1490214155cdbbeb220d 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 54ab13a0de415c1c76d2449626ad5d3ef92c3565..0ff03a33c81e8b193da81f690ce94c0f0c9ad82b 100644 (file)
@@ -94,8 +94,8 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* restore singlestep on return to user mode */
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
-#define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE             17      /* is terminating due to OOM killer */
+#define TIF_POLLING_NRFLAG           /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE                   /* is terminating due to OOM killer */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
@@ -105,8 +105,16 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 
-#define _TIF_WORK_MASK         0x0000FFFE      /* work to do on interrupt/exception return */
-#define _TIF_ALLWORK_MASK      0x0000FFFF      /* work to do on any return to u-space */
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK         \
+       (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP)
+
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK      (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE)
+
+#if _TIF_ALLWORK_MASK >= 0x2000
+#error "_TIF_ALLWORK_MASK won't fit in an ANDI now (see entry.S)"
+#endif
 
 /*
  * Thread-synchronous status.
index 5ba23f715ea5e7f0f3dfcc0cab6a860519e41ec6..7d5e000fd32e0e4d40c78c2fcec1806f17bb1286 100644 (file)
@@ -905,18 +905,19 @@ __syscall_call:
 __syscall_exit:
        LEDS            0x6300
 
-       sti             gr8,@(gr28,#REG_GR(8))  ; save return value
+       # keep current PSR in GR23
+       movsg           psr,gr23
 
-       # rebuild saved psr - execve will change it for init/main.c
        ldi             @(gr28,#REG_PSR),gr22
+
+       sti.p           gr8,@(gr28,#REG_GR(8))  ; save return value
+
+       # rebuild saved psr - execve will change it for init/main.c
        srli            gr22,#1,gr5
        andi.p          gr22,#~PSR_PS,gr22
        andi            gr5,#PSR_PS,gr5
        or              gr5,gr22,gr22
-       ori             gr22,#PSR_S,gr22
-
-       # keep current PSR in GR23
-       movsg           psr,gr23
+       ori.p           gr22,#PSR_S,gr22
 
        # make sure we don't miss an interrupt setting need_resched or sigpending between
        # sampling and the RETT
@@ -924,9 +925,7 @@ __syscall_exit:
        movgs           gr23,psr
 
        ldi             @(gr15,#TI_FLAGS),gr4
-       sethi.p         %hi(_TIF_ALLWORK_MASK),gr5
-       setlo           %lo(_TIF_ALLWORK_MASK),gr5
-       andcc           gr4,gr5,gr0,icc0
+       andicc          gr4,#_TIF_ALLWORK_MASK,gr0,icc0
        bne             icc0,#0,__syscall_exit_work
 
        # restore all registers and return
@@ -1111,9 +1110,7 @@ __entry_resume_userspace:
 __entry_return_from_user_interrupt:
        LEDS            0x6402
        ldi             @(gr15,#TI_FLAGS),gr4
-       sethi.p         %hi(_TIF_WORK_MASK),gr5
-       setlo           %lo(_TIF_WORK_MASK),gr5
-       andcc           gr4,gr5,gr0,icc0
+       andicc          gr4,#_TIF_WORK_MASK,gr0,icc0
        beq             icc0,#1,__entry_return_direct
 
 __entry_work_pending:
@@ -1133,9 +1130,7 @@ __entry_work_resched:
 
        LEDS            0x6401
        ldi             @(gr15,#TI_FLAGS),gr4
-       sethi.p         %hi(_TIF_WORK_MASK),gr5
-       setlo           %lo(_TIF_WORK_MASK),gr5
-       andcc           gr4,gr5,gr0,icc0
+       andicc          gr4,#_TIF_WORK_MASK,gr0,icc0
        beq             icc0,#1,__entry_return_direct
        andicc          gr4,#_TIF_NEED_RESCHED,gr0,icc0
        bne             icc0,#1,__entry_work_resched
@@ -1163,7 +1158,9 @@ __syscall_trace_entry:
        # perform syscall exit tracing
 __syscall_exit_work:
        LEDS            0x6340
-       andicc          gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+       andicc          gr22,#PSR_PS,gr0,icc1   ; don't handle on return to kernel mode
+       andicc.p        gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+       bne             icc1,#0,__entry_return_direct
        beq             icc0,#1,__entry_work_pending
 
        movsg           psr,gr23
index 8cf5dca01758f75e63041d2c81edf1d8542bbb0c..864c2f0d497bfa62800e4168cac94215bff4d128 100644 (file)
@@ -28,8 +28,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct fdpic_func_descriptor {
        unsigned long   text;
        unsigned long   GOT;
@@ -149,7 +147,6 @@ asmlinkage int sys_sigreturn(void)
            __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(&frame->sc, &gr8))
@@ -172,7 +169,6 @@ asmlinkage int sys_rt_sigreturn(void)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8))
@@ -426,9 +422,10 @@ give_sigsegv:
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(unsigned long sig, siginfo_t *info,
-                        struct k_sigaction *ka, sigset_t *oldset)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+                        struct k_sigaction *ka)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -460,11 +457,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        else
                ret = setup_frame(sig, ka, oldset);
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (ret)
+               return;
 
+       signal_delivered(sig, info, ka, __frame,
+                                test_thread_flag(TIF_SINGLESTEP));
 } /* end handle_signal() */
 
 /*****************************************************************************/
@@ -477,44 +474,14 @@ static void do_signal(void)
 {
        struct k_sigaction ka;
        siginfo_t info;
-       sigset_t *oldset;
        int signr;
 
-       /*
-        * We want the common case to go fast, which
-        * is why we may in certain cases get here from
-        * kernel mode. Just return without doing anything
-        * if so.
-        */
-       if (!user_mode(__frame))
-               return;
-
-       if (try_to_freeze())
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
        if (signr > 0) {
-               if (handle_signal(signr, &info, &ka, oldset) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, __frame,
-                                                test_thread_flag(TIF_SINGLESTEP));
-               }
-
+               handle_signal(signr, &info, &ka);
                return;
        }
 
-no_signal:
        /* Did we come from a system call? */
        if (__frame->syscallno != -1) {
                /* Restart the system call - no handlers present */
@@ -536,11 +503,7 @@ no_signal:
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
-
+       restore_saved_sigmask();
 } /* end do_signal() */
 
 /*****************************************************************************/
@@ -555,15 +518,13 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
                clear_thread_flag(TIF_SINGLESTEP);
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal();
 
        /* deal with notification on about to resume userspace execution */
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(__frame);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 
 } /* end do_notify_resume() */
index a09230a08e02d201e4c2b214a0bd6cabc5a8c223..62ef17676b406274a76ddf5d3a51f7091a9ace97 100644 (file)
@@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long);
 #define        VMALLOC_END     0xffffffff
 
 #define arch_enter_lazy_cpu_mode()    do {} while (0)
+
+#include <asm-generic/pgtable.h>
+
 #endif /* _H8300_PGTABLE_H */
index bc4c34efb1ad167ccafa90ee8796d605058572a0..91e62ba4c7b02e99cf73893ef04e6a8cf259025a 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 356068cd0879bdf145bd68f12857b75cfd95ab4c..8725d1ad427263895a6e5d904c179e2c1440bbe0 100644 (file)
@@ -100,7 +100,6 @@ extern int __put_user_bad(void);
        break;                                                  \
     default:                                                   \
        __gu_err = __get_user_bad();                            \
-       __gu_val = 0;                                           \
        break;                                                  \
     }                                                          \
     (x) = __gu_val;                                            \
@@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n)
        return 0;
 }
 
+#define __clear_user   clear_user
+
 #endif /* _H8300_UACCESS_H */
index 68d651081bd3f00dcbc71767e86b1614a98c9ab8..d0b1607f2711f577ec194e12127c458a6edd18a2 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/setup.h>
 #include <asm/irq.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #if defined(__H8300H__)
 #define CPU "H8/300H"
@@ -54,7 +55,6 @@ unsigned long memory_end;
 
 char __initdata command_line[COMMAND_LINE_SIZE];
 
-extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
 extern int _ramstart, _ramend;
 extern char _target_name[];
 extern void h8300_gpio_init(void);
@@ -119,9 +119,9 @@ void __init setup_arch(char **cmdline_p)
            memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
 #endif
 
-       init_mm.start_code = (unsigned long) &_stext;
-       init_mm.end_code = (unsigned long) &_etext;
-       init_mm.end_data = (unsigned long) &_edata;
+       init_mm.start_code = (unsigned long) _stext;
+       init_mm.end_code = (unsigned long) _etext;
+       init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = (unsigned long) 0; 
 
 #if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
@@ -134,15 +134,12 @@ void __init setup_arch(char **cmdline_p)
        printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
 
 #ifdef DEBUG
-       printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
-               "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
-               (int) &_sdata, (int) &_edata,
-               (int) &_sbss, (int) &_ebss);
-       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x "
-               "STACK=0x%06x-0x%06x\n",
-              (int) &_ebss, (int) memory_start,
-               (int) memory_start, (int) memory_end,
-               (int) memory_end, (int) &_ramend);
+       printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
+               "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
+               __bss_stop);
+       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
+               "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
+               memory_end, memory_end, &_ramend);
 #endif
 
 #ifdef CONFIG_DEFAULT_CMDLINE
index d4b0555d29047851d9a80d9ae020f30eecc1a370..5adaadaf92183c42703b9995e32e622dc926d958 100644 (file)
@@ -47,8 +47,6 @@
 #include <asm/traps.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
@@ -186,7 +184,6 @@ asmlinkage int do_sigreturn(unsigned long __unused,...)
                              sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        
        if (restore_sigcontext(regs, &frame->sc, &er0))
@@ -211,7 +208,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused,...)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0))
@@ -412,8 +408,9 @@ give_sigsegv:
  */
 static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-             sigset_t *oldset, struct pt_regs * regs)
+             struct pt_regs * regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
        /* are we from a system call? */
        if (regs->orig_er0 >= 0) {
@@ -441,10 +438,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        else
                ret = setup_frame(sig, ka, oldset, regs);
 
-       if (!ret) {
-               block_sigmask(ka, sig);
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-       }
+       if (!ret)
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -452,12 +447,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-statis void do_signal(struct pt_regs *regs)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -468,23 +462,14 @@ statis void do_signal(struct pt_regs *regs)
        if ((regs->ccr & 0x10))
                return;
 
-       if (try_to_freeze())
-               goto no_signal;
-
        current->thread.esp0 = (unsigned long) regs;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &info, &ka, oldset, regs);
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
- no_signal:
        /* Did we come from a system call? */
        if (regs->orig_er0 >= 0) {
                /* Restart the system call - no handlers present */
@@ -501,8 +486,7 @@ statis void do_signal(struct pt_regs *regs)
        }
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
+       restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
@@ -513,7 +497,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 32263a138aa6ccf9a2a871a859254f2a18df03d9..e0f74191d55312fe4a8fa6548d95d34667604efe 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/profile.h>
 
 #include <asm/io.h>
+#include <asm/irq_regs.h>
 #include <asm/timer.h>
 
 #define        TICK_SIZE (tick_nsec / 1000)
index 973369c32a957cd4a1780e922301268ae0c30e19..981e25094b1a403d2fa6505b7a0733fd6cfac3ec 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #undef DEBUG
 
@@ -123,7 +124,6 @@ void __init mem_init(void)
        int codek = 0, datak = 0, initk = 0;
        /* DAVIDM look at setup memory map generically with reserved area */
        unsigned long tmp;
-       extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
        extern unsigned long  _ramend, _ramstart;
        unsigned long len = &_ramend - &_ramstart;
        unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
@@ -142,9 +142,9 @@ void __init mem_init(void)
        /* this will put all memory onto the freelists */
        totalram_pages = free_all_bootmem();
 
-       codek = (&_etext - &_stext) >> 10;
-       datak = (&_ebss - &_sdata) >> 10;
-       initk = (&__init_begin - &__init_end) >> 10;
+       codek = (_etext - _stext) >> 10;
+       datak = (__bss_stop - _sdata) >> 10;
+       initk = (__init_begin - __init_end) >> 10;
 
        tmp = nr_free_pages() << PAGE_SHIFT;
        printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
@@ -178,22 +178,21 @@ free_initmem(void)
 {
 #ifdef CONFIG_RAMKERNEL
        unsigned long addr;
-       extern char __init_begin, __init_end;
 /*
  *     the following code should be cool even if these sections
  *     are not page aligned.
  */
-       addr = PAGE_ALIGN((unsigned long)(&__init_begin));
+       addr = PAGE_ALIGN((unsigned long)(__init_begin));
        /* next to check that the page we free is not a partial page */
-       for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
+       for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
                free_page(addr);
                totalram_pages++;
        }
        printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
-                       (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
-                       (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
+                       (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
+                       (int)(PAGE_ALIGN((unsigned long)__init_begin)),
                        (int)(addr - PAGE_SIZE));
 #endif
 }
index 434866eb0f1cf36d639c5e14367458bf272522df..304b0808d07213f0853ee256d781a114f561a947 100644 (file)
@@ -31,8 +31,6 @@
 #include <asm/signal.h>
 #include <asm/vdso.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct rt_sigframe {
        unsigned long tramp[2];
        struct siginfo info;
@@ -149,11 +147,9 @@ sigsegv:
 /*
  * Setup invocation of signal handler
  */
-static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
-                        sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
+                        struct pt_regs *regs)
 {
-       int rc;
-
        /*
         * If we're handling a signal that aborted a system call,
         * set up the error return value before adding the signal
@@ -186,15 +182,12 @@ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
         * Set up the stack frame; not doing the SA_SIGINFO thing.  We
         * only set up the rt_frame flavor.
         */
-       rc = setup_rt_frame(sig, ka, info, oldset, regs);
-
        /* If there was an error on setup, no signal was delivered. */
-       if (rc)
-               return rc;
-
-       block_sigmask(ka, sig);
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+               return;
 
-       return 0;
+       signal_delivered(sig, info, ka, regs,
+                       test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -209,34 +202,13 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (try_to_freeze())
-               goto no_signal;
-
        signo = get_signal_to_deliver(&info, &sigact, regs, NULL);
 
        if (signo > 0) {
-               sigset_t *oldset;
-
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-
-               if (handle_signal(signo, &info, &sigact, oldset, regs) == 0) {
-                       /*
-                        * Successful delivery case.  The saved sigmask is
-                        * stored in the signal frame, and will be restored
-                        * by sigreturn.  We can clear the TIF flag.
-                        */
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signo, &info, &sigact, regs,
-                               test_thread_flag(TIF_SINGLESTEP));
-               }
+               handle_signal(signo, &info, &sigact, regs);
                return;
        }
 
-no_signal:
        /*
         * If we came from a system call, handle the restart.
         */
@@ -259,10 +231,7 @@ no_signal:
 
 no_restart:
        /* If there's no signal to deliver, put the saved sigmask back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -273,8 +242,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
@@ -303,7 +270,6 @@ asmlinkage int sys_rt_sigreturn(void)
        if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
                goto badframe;
 
-       sigdelsetmask(&blocked, ~_BLOCKABLE);
        set_current_blocked(&blocked);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
index 7323ab9467ebae726473512588d2c8f41d0ceb40..99ee1d6510cfc98a7dc66128fca841af022b133f 100644 (file)
@@ -1,9 +1,6 @@
 #ifndef _ASM_IA64_POSIX_TYPES_H
 #define _ASM_IA64_POSIX_TYPES_H
 
-typedef unsigned int   __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned long  __kernel_sigset_t;      /* at least 32 bits */
 
 #include <asm-generic/posix_types.h>
index 310d9734f02d11a2537311b6860a736ff5fa63a7..f7ee85378311a0f2d241a458640f697b4940a2a1 100644 (file)
@@ -141,7 +141,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif /* !__ASSEMBLY__ */
 
index f00ba025375d5696d0070bfe640b6f26f554eebd..d7f558c1e7117bfff75a056d4fee9213c6a4b7fb 100644 (file)
@@ -604,12 +604,6 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
        spin_unlock(&(x)->ctx_lock);
 }
 
-static inline unsigned long 
-pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
-{
-       return get_unmapped_area(file, addr, len, pgoff, flags);
-}
-
 /* forward declaration */
 static const struct dentry_operations pfmfs_dentry_operations;
 
@@ -2333,8 +2327,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        down_write(&task->mm->mmap_sem);
 
        /* find some free area in address space, must have mmap sem held */
-       vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
-       if (vma->vm_start == 0UL) {
+       vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
+       if (IS_ERR_VALUE(vma->vm_start)) {
                DPRINT(("Cannot find unmapped area for size %ld\n", size));
                up_write(&task->mm->mmap_sem);
                goto error;
index 5e0e86ddb12f7801ec0a1f7e5579dae48168d5a4..dd6fc14497419dca929be82a44b55ce638bc4705 100644 (file)
@@ -199,8 +199,6 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
        if (test_thread_flag(TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(&scr->pt);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 
        /* copy user rbs to kernel rbs */
index 7523501d3bc087bbb39a22573776ba365d131aeb..a199be1fe619bc12d00a9e87198b6f919a9f2c74 100644 (file)
@@ -30,7 +30,6 @@
 
 #define DEBUG_SIG      0
 #define STACK_ALIGN    16              /* minimal alignment for stack pointer */
-#define _BLOCKABLE     (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
 #if _NSIG_WORDS > 1
 # define PUT_SIGSET(k,u)       __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t))
@@ -200,7 +199,6 @@ ia64_rt_sigreturn (struct sigscratch *scr)
        if (GET_SIGSET(&set, &sc->sc_mask))
                goto give_sigsegv;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(sc, scr))
@@ -415,18 +413,13 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
 }
 
 static long
-handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
+handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
               struct sigscratch *scr)
 {
-       if (!setup_frame(sig, ka, info, oldset, scr))
+       if (!setup_frame(sig, ka, info, sigmask_to_save(), scr))
                return 0;
 
-       block_sigmask(ka, sig);
-
-       /*
-        * Let tracing know that we've done the handler setup.
-        */
-       tracehook_signal_handler(sig, info, ka, &scr->pt,
+       signal_delivered(sig, info, ka, &scr->pt,
                                 test_thread_flag(TIF_SINGLESTEP));
 
        return 1;
@@ -440,7 +433,6 @@ void
 ia64_do_signal (struct sigscratch *scr, long in_syscall)
 {
        struct k_sigaction ka;
-       sigset_t *oldset;
        siginfo_t info;
        long restart = in_syscall;
        long errno = scr->pt.r8;
@@ -453,11 +445,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
        if (!user_mode(&scr->pt))
                return;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        /*
         * This only loops in the rare cases of handle_signal() failing, in which case we
         * need to push through a forced SIGSEGV.
@@ -507,16 +494,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
                 * Whee!  Actually deliver the signal.  If the delivery failed, we need to
                 * continue to iterate in this loop so we can deliver the SIGSEGV...
                 */
-               if (handle_signal(signr, &ka, &info, oldset, scr)) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+               if (handle_signal(signr, &ka, &info, scr))
                        return;
-               }
        }
 
        /* Did we come from a system call? */
@@ -538,8 +517,5 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
index 609d50056a6c7bd9fba2d46b757960b3e8893388..d9439ef2f66187d9e864f91778b8a9022c41577c 100644 (file)
@@ -171,22 +171,9 @@ asmlinkage unsigned long
 ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
             unsigned long new_addr)
 {
-       extern unsigned long do_mremap (unsigned long addr,
-                                       unsigned long old_len,
-                                       unsigned long new_len,
-                                       unsigned long flags,
-                                       unsigned long new_addr);
-
-       down_write(&current->mm->mmap_sem);
-       {
-               addr = do_mremap(addr, old_len, new_len, flags, new_addr);
-       }
-       up_write(&current->mm->mmap_sem);
-
-       if (IS_ERR((void *) addr))
-               return addr;
-
-       force_successful_syscall_return();
+       addr = sys_mremap(addr, old_len, new_len, flags, new_addr);
+       if (!IS_ERR((void *) addr))
+               force_successful_syscall_return();
        return addr;
 }
 
index 177716b1d61354a74a86ecfff0e08ba522660ec8..01729c2979ba2634f49f7c4403aab4e95652fead 100644 (file)
@@ -43,9 +43,9 @@ endif
 
 OBJCOPYFLAGS += -R .empty_zero_page
 
-suffix_$(CONFIG_KERNEL_GZIP)   = gz
-suffix_$(CONFIG_KERNEL_BZIP2)  = bz2
-suffix_$(CONFIG_KERNEL_LZMA)   = lzma
+suffix-$(CONFIG_KERNEL_GZIP)   = gz
+suffix-$(CONFIG_KERNEL_BZIP2)  = bz2
+suffix-$(CONFIG_KERNEL_LZMA)   = lzma
 
 $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
        $(call if_changed,ld)
index 370d60881977db4370f1686bee5d3b59a19eba69..28a09529f206915fd00633bf846dde48342b8b36 100644 (file)
@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
 static unsigned long free_mem_end_ptr;
 
 #ifdef CONFIG_KERNEL_BZIP2
-static void *memset(void *s, int c, size_t n)
+void *memset(void *s, int c, size_t n)
 {
        char *ss = s;
 
@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
 #endif
 
 #ifdef CONFIG_KERNEL_GZIP
+void *memcpy(void *dest, const void *src, size_t n)
+{
+       char *d = dest;
+       const char *s = src;
+       while (n--)
+               *d++ = *s++;
+
+       return dest;
+}
+
 #define BOOT_HEAP_SIZE             0x10000
 #include "../../../../lib/decompress_inflate.c"
 #endif
index 0195850e1f88698b7a6c29ffd8b807b9e38a5b53..236de26a409b3f9a3d85df67129d88a488d025d1 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 527527584dd096dc2d7c2cc55bab64e83df0fee6..4313aa62b51b76746b4b800a592b3f55899c395e 100644 (file)
@@ -113,9 +113,6 @@ struct pt_regs {
 
 #define PTRACE_OLDSETOPTIONS   21
 
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD  0x00000001
-
 #ifdef __KERNEL__
 
 #include <asm/m32r.h>          /* M32R_PSW_BSM, M32R_PSW_BPM */
index 4c03361537aa6b49f3e2ba144afd667cd6b75307..51f5e9aa49016fdce8112eb72083c0b167f21df8 100644 (file)
@@ -591,17 +591,16 @@ void user_enable_single_step(struct task_struct *child)
 
        if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
            != sizeof(insn))
-               return -EIO;
+               return;
 
        compute_next_pc(insn, pc, &next_pc, child);
        if (next_pc & 0x80000000)
-               return -EIO;
+               return;
 
        if (embed_debug_trap(child, next_pc))
-               return -EIO;
+               return;
 
        invalidate_cache();
-       return 0;
 }
 
 void user_disable_single_step(struct task_struct *child)
index f54d96993ea187686f7264fdab3867d5ffc821ca..d0f60b97bbc5d82b1855e73145ce0b809124200f 100644 (file)
@@ -28,8 +28,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                unsigned long r2, unsigned long r3, unsigned long r4,
@@ -111,7 +109,6 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
@@ -267,9 +264,9 @@ static int prev_insn(struct pt_regs *regs)
  * OK, we're invoking a handler
  */
 
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
        /* Are we from a system call? */
        if (regs->syscall_nr >= 0) {
@@ -289,16 +286,15 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                        case -ERESTARTNOINTR:
                                regs->r0 = regs->orig_r0;
                                if (prev_insn(regs) < 0)
-                                       return -EFAULT;
+                                       return;
                }
        }
 
        /* Set up the stack frame */
-       if (setup_rt_frame(sig, ka, info, oldset, regs))
-               return -EFAULT;
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs))
+               return;
 
-       block_sigmask(ka, sig);
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -311,7 +307,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -322,14 +317,6 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (try_to_freeze()) 
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Re-enable any watchpoints before delivering the
@@ -339,13 +326,11 @@ static void do_signal(struct pt_regs *regs)
                 */
 
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+               handle_signal(signr, &ka, &info, regs);
 
                return;
        }
 
- no_signal:
        /* Did we come from a system call? */
        if (regs->syscall_nr >= 0) {
                /* Restart the system call - no handlers present */
@@ -360,10 +345,7 @@ static void do_signal(struct pt_regs *regs)
                        prev_insn(regs);
                }
        }
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -383,8 +365,6 @@ void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 
        clear_thread_flag(TIF_IRET);
index cac5b6be572a8b83c8331ba5aa83bf84f3f83159..1471201282605485d05fe0b0afd024988509d55c 100644 (file)
@@ -7,6 +7,8 @@ config M68K
        select GENERIC_IRQ_SHOW
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
        select GENERIC_CPU_DEVICES
+       select GENERIC_STRNCPY_FROM_USER if MMU
+       select GENERIC_STRNLEN_USER if MMU
        select FPU if MMU
        select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
 
index 1a922fad76f753aa66e8d212828935861892865a..eafa2539a8ee79dbcc65ea929ab082c767d93029 100644 (file)
@@ -1,2 +1,4 @@
 include include/asm-generic/Kbuild.asm
 header-y += cachectl.h
+
+generic-y += word-at-a-time.h
index d63b99ff7ff7aa8638bc3e44a2bba47923c38a6c..497c31c803ff55d3e57981fb590392b4f608d686 100644 (file)
@@ -86,7 +86,7 @@
 /*
  *     QSPI module.
  */
-#define        MCFQSPI_IOBASE          (MCF_IPSBAR + 0x340)
+#define        MCFQSPI_BASE            (MCF_IPSBAR + 0x340)
 #define        MCFQSPI_SIZE            0x40
 
 #define        MCFQSPI_CS0             147
index 6373093be72bb049f37f071468c3b6c73d6daafe..cf4dbf70fdc73f116f95a83c698511fd7b5f4a62 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 9c80cd515b2069cab1a28b2b54a16f70d9a19028..472c891a4aeee4c1f1bc095b2e33b08840c5a6c2 100644 (file)
@@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 #define copy_from_user(to, from, n)    __copy_from_user(to, from, n)
 #define copy_to_user(to, from, n)      __copy_to_user(to, from, n)
 
-long strncpy_from_user(char *dst, const char __user *src, long count);
-long strnlen_user(const char __user *src, long n);
+#define user_addr_max() \
+       (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 unsigned long __clear_user(void __user *to, unsigned long n);
 
 #define clear_user     __clear_user
 
-#define strlen_user(str) strnlen_user(str, 32767)
-
 #endif /* _M68K_UACCESS_H */
index 8b4a2222e65877dd35eb736c8ca25592f8695ad0..1bc10e62b9affa45f8938f1f0150431971bffcde 100644 (file)
@@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void)
        }
 }
 
-#ifdef CONFIG_COLDFIRE
+#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
 asmlinkage int syscall_trace_enter(void)
 {
        int ret = 0;
index d9f3d1900eed029a5044fb05845dceeccb261844..710a528b928b8580ac4281449586ee4507966db9 100644 (file)
@@ -51,8 +51,6 @@
 #include <asm/traps.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 #ifdef CONFIG_MMU
 
 /*
@@ -795,7 +793,6 @@ asmlinkage int do_sigreturn(unsigned long __unused)
                              sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, frame + 1))
@@ -820,7 +817,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (rt_restore_ucontext(regs, sw, &frame->uc))
@@ -1123,8 +1119,9 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
  */
 static void
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int err;
        /* are we from a system call? */
        if (regs->orig_d0 >= 0)
@@ -1140,14 +1137,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (err)
                return;
 
-       block_sigmask(ka, sig);
+       signal_delivered(sig, info, ka, regs, 0);
 
        if (test_thread_flag(TIF_DELAYED_TRACE)) {
                regs->sr &= ~0x8000;
                send_sig(SIGTRAP, current, 1);
        }
-
-       clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 
 /*
@@ -1160,19 +1155,13 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        struct k_sigaction ka;
        int signr;
-       sigset_t *oldset;
 
        current->thread.esp0 = (unsigned long) regs;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &ka, &info, oldset, regs);
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
 
@@ -1182,10 +1171,7 @@ static void do_signal(struct pt_regs *regs)
                handle_restart(regs, NULL, 0);
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs)
@@ -1193,9 +1179,6 @@ void do_notify_resume(struct pt_regs *regs)
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs);
 
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
index d7deb7fc7eb5c667483d4604d7537cd95f2f59a6..707f0573ec6bfade21e43c7a91bd0b3896ed8d79 100644 (file)
@@ -85,7 +85,7 @@ void __init time_init(void)
        mach_sched_init(timer_interrupt);
 }
 
-#ifdef CONFIG_M68KCLASSIC
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 
 u32 arch_gettimeoffset(void)
 {
@@ -108,4 +108,4 @@ static int __init rtc_init(void)
 
 module_init(rtc_init);
 
-#endif /* CONFIG_M68KCLASSIC */
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
index 5664386338da851094d48191ac1f717aa5dfff69..5e97f2ee7c1197feaf1fb79ec07cd10566466026 100644 (file)
@@ -103,80 +103,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
 }
 EXPORT_SYMBOL(__generic_copy_to_user);
 
-/*
- * Copy a null terminated string from userspace.
- */
-long strncpy_from_user(char *dst, const char __user *src, long count)
-{
-       long res;
-       char c;
-
-       if (count <= 0)
-               return count;
-
-       asm volatile ("\n"
-               "1:     "MOVES".b       (%2)+,%4\n"
-               "       move.b  %4,(%1)+\n"
-               "       jeq     2f\n"
-               "       subq.l  #1,%3\n"
-               "       jne     1b\n"
-               "2:     sub.l   %3,%0\n"
-               "3:\n"
-               "       .section .fixup,\"ax\"\n"
-               "       .even\n"
-               "10:    move.l  %5,%0\n"
-               "       jra     3b\n"
-               "       .previous\n"
-               "\n"
-               "       .section __ex_table,\"a\"\n"
-               "       .align  4\n"
-               "       .long   1b,10b\n"
-               "       .previous"
-               : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
-               : "i" (-EFAULT), "0" (count));
-
-       return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-long strnlen_user(const char __user *src, long n)
-{
-       char c;
-       long res;
-
-       asm volatile ("\n"
-               "1:     subq.l  #1,%1\n"
-               "       jmi     3f\n"
-               "2:     "MOVES".b       (%0)+,%2\n"
-               "       tst.b   %2\n"
-               "       jne     1b\n"
-               "       jra     4f\n"
-               "\n"
-               "3:     addq.l  #1,%0\n"
-               "4:     sub.l   %4,%0\n"
-               "5:\n"
-               "       .section .fixup,\"ax\"\n"
-               "       .even\n"
-               "20:    sub.l   %0,%0\n"
-               "       jra     5b\n"
-               "       .previous\n"
-               "\n"
-               "       .section __ex_table,\"a\"\n"
-               "       .align  4\n"
-               "       .long   2b,20b\n"
-               "       .previous\n"
-               : "=&a" (res), "+d" (n), "=&d" (c)
-               : "0" (src), "r" (src));
-
-       return res;
-}
-EXPORT_SYMBOL(strnlen_user);
-
 /*
  * Zero Userspace
  */
index c801c172b822354017151cb040e348db207f73b9..f4dc9b29560940485c28208bbcba493d0271202a 100644 (file)
@@ -53,6 +53,7 @@
 #endif
 
 static u32 m68328_tick_cnt;
+static irq_handler_t timer_interrupt;
 
 /***************************************************************************/
 
@@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
        TSTAT &= 0;
 
        m68328_tick_cnt += TICKS_PER_JIFFY;
-       return arch_timer_interrupt(irq, dummy);
+       return timer_interrupt(irq, dummy);
 }
 
 /***************************************************************************/
@@ -99,7 +100,7 @@ static struct clocksource m68328_clk = {
 
 /***************************************************************************/
 
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
 {
        /* disable timer 1 */
        TCTL = 0;
@@ -115,6 +116,7 @@ void hw_timer_init(void)
        /* Enable timer 1 */
        TCTL |= TCTL_TEN;
        clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
+       timer_interrupt = handler;
 }
 
 /***************************************************************************/
index 255fc03913e9065cf3cb9474b70cc8abf002f165..9877cefad1e7640dd27622410c04f882acdb37e3 100644 (file)
@@ -35,6 +35,7 @@ extern void m360_cpm_reset(void);
 #define OSCILLATOR  (unsigned long int)33000000
 #endif
 
+static irq_handler_t timer_interrupt;
 unsigned long int system_clock;
 
 extern QUICC *pquicc;
@@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
 
   pquicc->timer_ter1 = 0x0002; /* clear timer event */
 
-  return arch_timer_interrupt(irq, dummy);
+  return timer_interrupt(irq, dummy);
 }
 
 static struct irqaction m68360_timer_irq = {
@@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = {
        .handler = hw_tick,
 };
 
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
 {
   unsigned char prescaler;
   unsigned short tgcr_save;
@@ -94,6 +95,8 @@ void hw_timer_init(void)
 
   pquicc->timer_ter1 = 0x0003; /* clear timer events */
 
+  timer_interrupt = handler;
+
   /* enable timer 1 interrupt in CIMR */
   setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
 
index 9f1260c5e2ad5d031b42b54447f141f247f808f4..44da406897e58b9b579d09cf4f69faeb7894725b 100644 (file)
@@ -42,4 +42,11 @@ unsigned long clk_get_rate(struct clk *clk)
        return MCF_CLK;
 }
 EXPORT_SYMBOL(clk_get_rate);
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+       return NULL;
+}
+EXPORT_SYMBOL(devm_clk_get);
+
 /***************************************************************************/
index 1a8ab6a5c03fc3ab399cd8fa75003b4220e69cd5..6c610234ffab96c8d849dfc8dac498bfaf1c9919 100644 (file)
@@ -166,7 +166,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif
 
index 7f4c7bef1642e0107afb3ffc7bbb02ce87b74585..76b9722557db77e99a3500154a34b8ed6556d958 100644 (file)
@@ -41,8 +41,6 @@
 #include <asm/cacheflush.h>
 #include <asm/syscalls.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage long
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                struct pt_regs *regs)
@@ -106,7 +104,6 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
@@ -310,10 +307,11 @@ do_restart:
  * OK, we're invoking a handler
  */
 
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka,
-               siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
+               siginfo_t *info, struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Set up the stack frame */
@@ -323,11 +321,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
                ret = setup_rt_frame(sig, ka, NULL, oldset, regs);
 
        if (ret)
-               return ret;
-
-       block_sigmask(ka, sig);
+               return;
 
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -344,33 +340,18 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 #ifdef DEBUG_SIG
        printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall);
        printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
                        regs->r12, current_thread_info()->flags);
 #endif
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee! Actually deliver the signal. */
                if (in_syscall)
                        handle_restart(regs, &ka, 1);
-               if (!handle_signal(signr, &ka, &info, oldset, regs)) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &=
-                           ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
 
@@ -381,10 +362,7 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, int in_syscall)
@@ -401,9 +379,6 @@ void do_notify_resume(struct pt_regs *regs, int in_syscall)
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs, in_syscall);
 
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
index 09ab87ee6fef654eef0220ab05b1224492947efc..b3e10fdd389866659212f947971af764b98dcf3f 100644 (file)
@@ -288,6 +288,7 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS64_R1
+       select SYS_HAS_CPU_MIPS64_R2
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_CPU_RM7000
        select SYS_HAS_EARLY_PRINTK
@@ -1423,6 +1424,7 @@ config CPU_SB1
 config CPU_CAVIUM_OCTEON
        bool "Cavium Octeon processor"
        depends on SYS_HAS_CPU_CAVIUM_OCTEON
+       select ARCH_SPARSEMEM_ENABLE
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_SMP
index 7dde01642d6b93527b5a6e1cc6af7d69d429ff34..bf2248474fa8da6c4516c0567803c781636cb385 100644 (file)
@@ -213,8 +213,6 @@ static int au1200_nand_device_ready(struct mtd_info *mtd)
        return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1200_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1200_nand_parts[] = {
        {
                .name   = "NAND FS 0",
@@ -235,7 +233,6 @@ struct platform_nand_data db1200_nand_platdata = {
                .nr_partitions  = ARRAY_SIZE(db1200_nand_parts),
                .partitions     = db1200_nand_parts,
                .chip_delay     = 20,
-               .part_probe_types = db1200_part_probes,
        },
        .ctrl = {
                .dev_ready      = au1200_nand_device_ready,
index 0893f2af0d01d543d0a5cc1add7d5179bcef7cca..c56e0246694ecdffbc4afda86374db50d8bc5cb8 100644 (file)
@@ -145,8 +145,6 @@ static int au1300_nand_device_ready(struct mtd_info *mtd)
        return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1300_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1300_nand_parts[] = {
        {
                .name   = "NAND FS 0",
@@ -167,7 +165,6 @@ struct platform_nand_data db1300_nand_platdata = {
                .nr_partitions  = ARRAY_SIZE(db1300_nand_parts),
                .partitions     = db1300_nand_parts,
                .chip_delay     = 20,
-               .part_probe_types = db1300_part_probes,
        },
        .ctrl = {
                .dev_ready      = au1300_nand_device_ready,
index 6815d0783cd8c195c8afdcdc47ab40c1753248e1..9eb79062f46e449f08a799b5f7c2d91a8c7696bc 100644 (file)
@@ -149,8 +149,6 @@ static int au1550_nand_device_ready(struct mtd_info *mtd)
        return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1550_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1550_nand_parts[] = {
        {
                .name   = "NAND FS 0",
@@ -171,7 +169,6 @@ struct platform_nand_data db1550_nand_platdata = {
                .nr_partitions  = ARRAY_SIZE(db1550_nand_parts),
                .partitions     = db1550_nand_parts,
                .chip_delay     = 20,
-               .part_probe_types = db1550_part_probes,
        },
        .ctrl = {
                .dev_ready      = au1550_nand_device_ready,
index 6210b8d841098c5182a7ea986e6ac66c6a02371b..b311be45a7207028a7cf6df754a8b54c0f259938 100644 (file)
@@ -21,6 +21,7 @@ config BCM47XX_BCMA
        select BCMA
        select BCMA_HOST_SOC
        select BCMA_DRIVER_MIPS
+       select BCMA_HOST_PCI if PCI
        select BCMA_DRIVER_PCI_HOSTMODE if PCI
        default y
        help
index de4d917fd54d5ae99fa3ec69e55666a4fe3945cc..a551bab5ecb94b2aafce31787cea8e6bb31dbd10 100644 (file)
@@ -79,11 +79,11 @@ static int __init config_pcmcia_cs(unsigned int cs,
        return ret;
 }
 
-static const __initdata struct {
+static const struct {
        unsigned int    cs;
        unsigned int    base;
        unsigned int    size;
-} pcmcia_cs[3] = {
+} pcmcia_cs[3] __initconst = {
        {
                .cs     = MPI_CS_PCMCIA_COMMON,
                .base   = BCM_PCMCIA_COMMON_BASE_PA,
index f9e275a50d982be23ffda94a1e23d019c25569b5..2f4f6d5e05b66bdad381da0ddaff5b83d3897d51 100644 (file)
@@ -82,10 +82,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
        help
          Lock the kernel's implementation of memcpy() into L2.
 
-config ARCH_SPARSEMEM_ENABLE
-       def_bool y
-       select SPARSEMEM_STATIC
-
 config IOMMU_HELPER
        bool
 
index 4b93048044eb266457b4c9ac80a164c056e6eba9..ee1fb9f7f517c29e1ce6781699cbf90e75a314ce 100644 (file)
@@ -185,7 +185,6 @@ static void __cpuinit octeon_init_secondary(void)
        octeon_init_cvmcount();
 
        octeon_irq_setup_secondary();
-       raw_local_irq_enable();
 }
 
 /**
@@ -233,6 +232,7 @@ static void octeon_smp_finish(void)
 
        /* to generate the first CPU timer interrupt */
        write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+       local_irq_enable();
 }
 
 /**
index 2e1ad4c652b72cf9042524870a19631f9e0566e0..82ad35ce2b45d975705e90a90db8cf82b7189255 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/irqflags.h>
 #include <linux/types.h>
 #include <asm/barrier.h>
-#include <asm/bug.h>
 #include <asm/byteorder.h>             /* sigh ... */
 #include <asm/cpu-features.h>
 #include <asm/sgidefs.h>
index 285a41fa0b18dd0412a291805781f8d811ee82de..eee10dc07ac100defc6b0f00f6d8616da692e2c0 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __ASM_CMPXCHG_H
 #define __ASM_CMPXCHG_H
 
+#include <linux/bug.h>
 #include <linux/irqflags.h>
 #include <asm/war.h>
 
index f9fa2a479dd0e175ae7fc839d833b15695103f04..95e40c1e8ed114a3a95ad9cab9794e129ccbdca2 100644 (file)
@@ -94,6 +94,7 @@
 #define PRID_IMP_24KE          0x9600
 #define PRID_IMP_74K           0x9700
 #define PRID_IMP_1004K         0x9900
+#define PRID_IMP_M14KC         0x9c00
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -260,12 +261,12 @@ enum cpu_type_enum {
         */
        CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
        CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
-       CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
+       CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC,
 
        /*
         * MIPS64 class processors
         */
-       CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
+       CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
        CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
        CPU_XLR, CPU_XLP,
 
@@ -288,7 +289,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M64R2     0x00000100
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
-       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 )
+       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
        MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
 
index 86548da650e765f79db345f4d3964a5d0eb6c0fe..991b659e254803937cc99c9854d2c24f1153b0d3 100644 (file)
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
 #define GIC_VPE_EIC_SS(intr) \
-       (GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+       (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
 
 #define GIC_VPE_EIC_VEC_BASE           0x0800
 #define GIC_VPE_EIC_VEC(intr) \
@@ -330,6 +330,17 @@ struct gic_intr_map {
 #define GIC_FLAG_TRANSPARENT   0x02
 };
 
+/*
+ * This is only used in EIC mode. This helps to figure out which
+ * shared interrupts we need to process when we get a vector interrupt.
+ */
+#define GIC_MAX_SHARED_INTR  0x5
+struct gic_shared_intr_map {
+       unsigned int num_shared_intr;
+       unsigned int intr_list[GIC_MAX_SHARED_INTR];
+       unsigned int local_intr_mask;
+};
+
 extern void gic_init(unsigned long gic_base_addr,
        unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
        unsigned int intrmap_size, unsigned int irqbase);
@@ -338,5 +349,7 @@ extern unsigned int gic_get_int(void);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+extern void gic_bind_eic_interrupt(int irq, int set);
+extern unsigned int gic_get_timer_pending(void);
 
 #endif /* _ASM_GICREGS_H */
index 7ebfc392e58d1d5cb7c8f2a686e25a9041f853b8..ab84064283db2c50d847d888b32e42a4d316dd7d 100644 (file)
@@ -251,7 +251,7 @@ struct f_format {   /* FPU register format */
        unsigned int func : 6;
 };
 
-struct ma_format {     /* FPU multipy and add format (MIPS IV) */
+struct ma_format {     /* FPU multiply and add format (MIPS IV) */
        unsigned int opcode : 6;
        unsigned int fr : 5;
        unsigned int ft : 5;
@@ -324,7 +324,7 @@ struct f_format {   /* FPU register format */
        unsigned int opcode : 6;
 };
 
-struct ma_format {     /* FPU multipy and add format (MIPS IV) */
+struct ma_format {     /* FPU multiply and add format (MIPS IV) */
        unsigned int fmt : 2;
        unsigned int func : 4;
        unsigned int fd : 5;
index a58f22998a86507729e3d72d379ef6e23ddad019..29d9c23c20c72d87f847f28772666448cdd2711a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
+#include <asm/bug.h>
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
index fb698dc09bc9c39c60cf7c0d43992b424e239551..78dbb8a86da249d4bd4e6fa2db6b6c6b0c4bd4ed 100644 (file)
@@ -136,6 +136,7 @@ extern void free_irqno(unsigned int irq);
  * IE7.  Since R2 their number has to be read from the c0_intctl register.
  */
 #define CP0_LEGACY_COMPARE_IRQ 7
+#define CP0_LEGACY_PERFCNT_IRQ 7
 
 extern int cp0_compare_irq;
 extern int cp0_compare_irq_shift;
index 94d4faad29a1a4286c934d28e76175ca97eee935..fdcd78ca1b03d054f807ee0c64839c763194f855 100644 (file)
@@ -99,7 +99,7 @@
 #define CKCTL_6368_USBH_CLK_EN         (1 << 15)
 #define CKCTL_6368_DISABLE_GLESS_EN    (1 << 16)
 #define CKCTL_6368_NAND_CLK_EN         (1 << 17)
-#define CKCTL_6368_IPSEC_CLK_EN                (1 << 17)
+#define CKCTL_6368_IPSEC_CLK_EN                (1 << 18)
 
 #define CKCTL_6368_ALL_SAFE_EN         (CKCTL_6368_SWPKT_USB_EN |      \
                                        CKCTL_6368_SWPKT_SAR_EN |       \
index d11aa02a956a57ca41ff890dbe16bbdd0145db53..5447d9fc421941da85a2b0ce6bed2b865a4a8282 100644 (file)
 #define GIC_CPU_INT4           4 /* .                  */
 #define GIC_CPU_INT5           5 /* Core Interrupt 5   */
 
+/* MALTA GIC local interrupts */
+#define GIC_INT_TMR             (GIC_CPU_INT5)
+#define GIC_INT_PERFCTR         (GIC_CPU_INT5)
+
+/* GIC constants */
+/* Add 2 to convert non-eic hw int # to eic vector # */
+#define GIC_CPU_TO_VEC_OFFSET   (2)
+/* If we map an intr to pin X, GIC will actually generate vector X+1 */
+#define GIC_PIN_TO_VEC_OFFSET   (1)
+
 #define GIC_EXT_INTR(x)                x
 
 /* External Interrupts used for IPI */
index c9420aa97e3232dc8b96ccf2c960e6d0aa8f1f00..e71ff4c317f2d0bdd8df430b0f439286abb4559f 100644 (file)
@@ -48,7 +48,7 @@
 #define CP0_VPECONF0           $1, 2
 #define CP0_VPECONF1           $1, 3
 #define CP0_YQMASK             $1, 4
-#define CP0_VPESCHEDULE        $1, 5
+#define CP0_VPESCHEDULE                $1, 5
 #define CP0_VPESCHEFBK         $1, 6
 #define CP0_TCSTATUS           $2, 1
 #define CP0_TCBIND             $2, 2
index e0308dcca1358f6f2db6161486299de11d61dde5..fa03ec3fbf897a4c3271d8a38025f383c82760cc 100644 (file)
  * assume GCC is being used.
  */
 
-#if (_MIPS_SZLONG == 64)
-typedef unsigned int   __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-#endif
-
 typedef long           __kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
index 6e00f751ab6dc675b886d736e1f0c814136cfbd4..fe9a4c3ec5a1f2d9f557adf7c32348b0c892e374 100644 (file)
@@ -20,7 +20,7 @@ struct stat {
        long            st_pad1[3];             /* Reserved for network id */
        ino_t           st_ino;
        mode_t          st_mode;
-       nlink_t         st_nlink;
+       __u32           st_nlink;
        uid_t           st_uid;
        gid_t           st_gid;
        unsigned        st_rdev;
@@ -55,7 +55,7 @@ struct stat64 {
        unsigned long long      st_ino;
 
        mode_t          st_mode;
-       nlink_t         st_nlink;
+       __u32           st_nlink;
 
        uid_t           st_uid;
        gid_t           st_gid;
@@ -96,7 +96,7 @@ struct stat {
        unsigned long           st_ino;
 
        mode_t                  st_mode;
-       nlink_t                 st_nlink;
+       __u32                   st_nlink;
 
        uid_t                   st_uid;
        gid_t                   st_gid;
index 5d33621b5658f9c46314f4fcc1d2d123eaaf83d5..4f8ddba8c360050703d6d760b5909e92944e8b23 100644 (file)
@@ -22,7 +22,7 @@ struct task_struct;
  * switch_to(n) should switch tasks to task nr n, first
  * checking that n isn't the current task, in which case it does nothing.
  */
-extern asmlinkage void *resume(void *last, void *next, void *next_ti);
+extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
@@ -66,11 +66,13 @@ do {                                                                        \
 
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
+       u32 __usedfpu;                                                  \
        __mips_mt_fpaff_switch_to(prev);                                \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
        __clear_software_ll_bit();                                      \
-       (last) = resume(prev, next, task_thread_info(next));            \
+       __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);  \
+       (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
 } while (0)
 
 #define finish_arch_switch(prev)                                       \
index e2eca7d1059800e22704b467ab4f94f955df27c0..ca97e0ecb64b5054195b7d271017fa2760b7d05a 100644 (file)
@@ -60,6 +60,8 @@ struct thread_info {
 register struct thread_info *__current_thread_info __asm__("$28");
 #define current_thread_info()  __current_thread_info
 
+#endif /* !__ASSEMBLY__ */
+
 /* thread information allocation */
 #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
 #define THREAD_SIZE_ORDER (1)
@@ -85,8 +87,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 
 #define STACK_WARN     (THREAD_SIZE / 8)
 
-#endif /* !__ASSEMBLY__ */
-
 #define PREEMPT_ACTIVE         0x10000000
 
 /*
index 6ae7ce4ac63eb9b6a65ecf8ae0a49093579ae597..f4630e1082ab676a96e052e504995e86716be012 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) xxxx  the Anonymous
  * Copyright (C) 1994 - 2006 Ralf Baechle
  * Copyright (C) 2003, 2004  Maciej W. Rozycki
- * Copyright (C) 2001, 2004  MIPS Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012  MIPS Technologies, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -199,6 +199,7 @@ void __init check_wait(void)
                cpu_wait = rm7k_wait_irqoff;
                break;
 
+       case CPU_M14KC:
        case CPU_24K:
        case CPU_34K:
        case CPU_1004K:
@@ -810,6 +811,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_5KC;
                __cpu_name[cpu] = "MIPS 5Kc";
                break;
+       case PRID_IMP_5KE:
+               c->cputype = CPU_5KE;
+               __cpu_name[cpu] = "MIPS 5KE";
+               break;
        case PRID_IMP_20KC:
                c->cputype = CPU_20KC;
                __cpu_name[cpu] = "MIPS 20Kc";
@@ -831,6 +836,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_74K;
                __cpu_name[cpu] = "MIPS 74Kc";
                break;
+       case PRID_IMP_M14KC:
+               c->cputype = CPU_M14KC;
+               __cpu_name[cpu] = "MIPS M14Kc";
+               break;
        case PRID_IMP_1004K:
                c->cputype = CPU_1004K;
                __cpu_name[cpu] = "MIPS 1004Kc";
index 57ba13edb03af10da20a9ce936645902d9f562c1..3fc1691110dc52f82e8ec1271054c1411c122658 100644 (file)
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
  * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
  */
 #include <linux/interrupt.h>
@@ -34,6 +34,12 @@ EXPORT_SYMBOL(memmove);
 
 EXPORT_SYMBOL(kernel_thread);
 
+/*
+ * Functions that operate on entire pages.  Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
 /*
  * Userspace access stuff.
  */
index ce89c806170846a1ea2bbcf027598fae11028648..0441f54b2a6acc9ab27a0d229c1465a8e0fa5975 100644 (file)
@@ -31,7 +31,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
        .align  7
        LEAF(resume)
index f29099b104c497e5cb4b4a3d368a801fc6c60d05..eb5e394a4650bbe9f0814e51bc00e1b8523c57f1 100644 (file)
@@ -162,11 +162,6 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
        return counters >> vpe_shift();
 }
 
-static unsigned int counters_per_cpu_to_total(unsigned int counters)
-{
-       return counters << vpe_shift();
-}
-
 #else /* !CONFIG_MIPS_MT_SMP */
 #define vpe_id()       0
 
index 293898391e674bd0979f87918a019b3a377d575d..9c51be5a163a9249efa3bd74f12be62b64eba923 100644 (file)
@@ -43,7 +43,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
 LEAF(resume)
        mfc0    t1, CP0_STATUS
@@ -51,18 +51,9 @@ LEAF(resume)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
-       /*
-        * check if we need to save FPU registers
-        */
-       lw      t3, TASK_THREAD_INFO(a0)
-       lw      t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
+       beqz    a3, 1f
 
-       and     t0, t0, t1
-       sw      t0, TI_FLAGS(t3)
+       PTR_L   t3, TASK_THREAD_INFO(a0)
 
        /*
         * clear saved user stack CU1 bit
index 9414f935446968069ec4f22daed1ae19b0799fb7..42d2a3938420df28e64d1f016459529b13991b7e 100644 (file)
@@ -41,7 +41,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
        .align  5
        LEAF(resume)
        /*
         * check if we need to save FPU registers
         */
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-       LONG_L  t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
 
-       and     t0, t0, t1
-       LONG_S  t0, TI_FLAGS(t3)
+       beqz    a3, 1f
 
+       PTR_L   t3, TASK_THREAD_INFO(a0)
        /*
         * clear saved user stack CU1 bit
         */
index 10263b405981f0eb4604f1d38bd3887e10f1c129..9c60d09e62a71521007c3cae0894cdcf09f01424 100644 (file)
@@ -19,8 +19,6 @@
 #  define DEBUGP(fmt, args...)
 #endif
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Determine which stack to use..
  */
index 17f6ee30ad0d604da53fca7da7569bf4006d3d35..f2c09cfc60ac338dc9300f3487bae83e48a8cbd1 100644 (file)
@@ -339,7 +339,6 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
                goto badframe;
 
-       sigdelsetmask(&blocked, ~_BLOCKABLE);
        set_current_blocked(&blocked);
 
        sig = restore_sigcontext(&regs, &frame->sf_sc);
@@ -375,7 +374,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
@@ -514,9 +512,10 @@ struct mips_abi mips_abi = {
        .restart        = __NR_restart_syscall
 };
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-       struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+       struct k_sigaction *ka, struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
        struct mips_abi *abi = current->thread.abi;
        void *vdso = current->mm->context.vdso;
@@ -550,17 +549,14 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
                                       ka, regs, sig, oldset);
 
        if (ret)
-               return ret;
-
-       block_sigmask(ka, sig);
+               return;
 
-       return ret;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 static void do_signal(struct pt_regs *regs)
 {
        struct k_sigaction ka;
-       sigset_t *oldset;
        siginfo_t info;
        int signr;
 
@@ -572,25 +568,10 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
 
@@ -614,10 +595,7 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -630,14 +608,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        local_irq_enable();
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index b4fe2eacbd5d258a55146f5d51154491119569e8..da1b56a39ac77815b4989183fd07821286468dfe 100644 (file)
@@ -465,7 +465,6 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
                goto badframe;
 
-       sigdelsetmask(&blocked, ~_BLOCKABLE);
        set_current_blocked(&blocked);
 
        sig = restore_sigcontext32(&regs, &frame->sf_sc);
@@ -503,7 +502,6 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
index 63ffac9af7c5b61fa9fa966e641889f935b7d747..3574c145511be486b0feb82177ced3c80679d180 100644 (file)
@@ -109,7 +109,6 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
index 3046e2986006b448c884bbb7355a676cc63e6ad9..8e393b8443f7ae4aa8c4b8a99eb1f689e49957c1 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
-#include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/reboot.h>
@@ -197,13 +196,6 @@ static void bmips_init_secondary(void)
 
        write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
 #endif
-
-       /* make sure there won't be a timer interrupt for a little while */
-       write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
-
-       irq_enable_hazard();
-       set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
-       irq_enable_hazard();
 }
 
 /*
@@ -212,6 +204,13 @@ static void bmips_init_secondary(void)
 static void bmips_smp_finish(void)
 {
        pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+       /* make sure there won't be a timer interrupt for a little while */
+       write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+       irq_enable_hazard();
+       set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+       irq_enable_hazard();
 }
 
 /*
index 48650c8180401aeb1ce1c3f2713471d9c6e45536..1268392f1d2786bc3abb91fdb9a88ad84fa85ec9 100644 (file)
@@ -122,13 +122,21 @@ asmlinkage __cpuinit void start_secondary(void)
 
        notify_cpu_starting(cpu);
 
-       mp_ops->smp_finish();
+       set_cpu_online(cpu, true);
+
        set_cpu_sibling_map(cpu);
 
        cpu_set(cpu, cpu_callin_map);
 
        synchronise_count_slave();
 
+       /*
+        * irq will be enabled in ->smp_finish(), enabling it too early
+        * is dangerous.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
+       mp_ops->smp_finish();
+
        cpu_idle();
 }
 
@@ -196,8 +204,6 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
        while (!cpu_isset(cpu, cpu_callin_map))
                udelay(100);
 
-       set_cpu_online(cpu, true);
-
        return 0;
 }
 
index f5dd38f1d0152b49b13c971c59fde0cd218cb93e..15b5f3cfd20c48b9424dd5364d29cf1aeff77d4a 100644 (file)
@@ -322,7 +322,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
 
 /*
  * Common setup before any secondaries are started
- * Make sure all CPU's are in a sensible state before we boot any of the
+ * Make sure all CPUs are in a sensible state before we boot any of the
  * secondaries.
  *
  * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
@@ -340,12 +340,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
        /*
         * TCContext gets an offset from the base of the IPIQ array
         * to be used in low-level code to detect the presence of
-        * an active IPI queue
+        * an active IPI queue.
         */
        write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
        /* Bind tc to vpe */
        write_tc_c0_tcbind(vpe);
-       /* In general, all TCs should have the same cpu_data indications */
+       /* In general, all TCs should have the same cpu_data indications. */
        memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
        /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
        if (cpu_data[0].cputype == CPU_34K ||
@@ -358,8 +358,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
 }
 
 /*
- * Tweak to get Count registes in as close a sync as possible.
- * Value seems good for 34K-class cores.
+ * Tweak to get Count registes in as close a sync as possible.  The
+ * value seems good for 34K-class cores.
  */
 
 #define CP0_SKEW 8
@@ -615,7 +615,6 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
 
 void smtc_init_secondary(void)
 {
-       local_irq_enable();
 }
 
 void smtc_smp_finish(void)
@@ -631,6 +630,8 @@ void smtc_smp_finish(void)
        if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
                write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
 
+       local_irq_enable();
+
        printk("TC %d going on-line as CPU %d\n",
                cpu_data[smp_processor_id()].tc_id, smp_processor_id());
 }
index 99f913c8d7a6eb3e4db9ac2f7b8def493a4e5e5e..842d55e411fd396479b611ba0a1c0c2af2d4cb92 100644 (file)
@@ -111,7 +111,6 @@ void __cpuinit synchronise_count_master(void)
 void __cpuinit synchronise_count_slave(void)
 {
        int i;
-       unsigned long flags;
        unsigned int initcount;
        int ncpus;
 
@@ -123,8 +122,6 @@ void __cpuinit synchronise_count_slave(void)
        return;
 #endif
 
-       local_irq_save(flags);
-
        /*
         * Not every cpu is online at the time this gets called,
         * so we first wait for the master to say everyone is ready
@@ -154,7 +151,5 @@ void __cpuinit synchronise_count_slave(void)
        }
        /* Arrange for an interrupt in a short while */
        write_c0_compare(read_c0_count() + COUNTON);
-
-       local_irq_restore(flags);
 }
 #undef NR_LOOPS
index 2d0c2a277f525b5b89b89500a5153c9032ed4ce3..c3c29354370345ae74c25dce1e4e7e7da9263316 100644 (file)
@@ -132,6 +132,9 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
        unsigned long ra = regs->regs[31];
        unsigned long pc = regs->cp0_epc;
 
+       if (!task)
+               task = current;
+
        if (raw_show_trace || !__kernel_text_address(pc)) {
                show_raw_backtrace(sp);
                return;
@@ -1249,6 +1252,7 @@ static inline void parity_protection_init(void)
                break;
 
        case CPU_5KC:
+       case CPU_5KE:
                write_c0_ecc(0x80000000);
                back_to_back_c0_hazard();
                /* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -1498,6 +1502,7 @@ extern void flush_tlb_handlers(void);
  * Timer interrupt
  */
 int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
 int cp0_compare_irq_shift;
 
 /*
@@ -1597,7 +1602,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
                        cp0_perfcount_irq = -1;
        } else {
                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
-               cp0_compare_irq_shift = cp0_compare_irq;
+               cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
                cp0_perfcount_irq = -1;
        }
 
index 924da5eb7031498ea93dc050a7492aa78f5bb0ec..df243a64f4305305564ec1e1e5b7f68c4c6e55ad 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
+#include <asm/thread_info.h>
 #include <asm-generic/vmlinux.lds.h>
 
 #undef mips
@@ -72,7 +73,7 @@ SECTIONS
        .data : {       /* Data */
                . = . + DATAOFFSET;             /* for CONFIG_MAPPED_KERNEL */
 
-               INIT_TASK_DATA(PAGE_SIZE)
+               INIT_TASK_DATA(THREAD_SIZE)
                NOSAVE_DATA
                CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
                READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
index 4aa20280613ea3954db6e7925cc2fead924b0b78..fd6203f14f1fbfb8e608606235f77f3dd3113272 100644 (file)
@@ -3,8 +3,8 @@
 #
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
-                                  gup.o init.o mmap.o page.o tlbex.o \
-                                  tlbex-fault.o uasm.o
+                                  gup.o init.o mmap.o page.o page-funcs.o \
+                                  tlbex.o tlbex-fault.o uasm.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
index 5109be96d98d099ec8509dd1de6a9048f5c4b82d..f092c265dc6360a89403ac62c184b7f4ab417437 100644 (file)
@@ -977,7 +977,7 @@ static void __cpuinit probe_pcache(void)
                        c->icache.linesz = 2 << lsize;
                else
                        c->icache.linesz = lsize;
-               c->icache.sets = 64 << ((config1 >> 22) & 7);
+               c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
                c->icache.ways = 1 + ((config1 >> 16) & 7);
 
                icache_size = c->icache.sets *
@@ -997,7 +997,7 @@ static void __cpuinit probe_pcache(void)
                        c->dcache.linesz = 2 << lsize;
                else
                        c->dcache.linesz= lsize;
-               c->dcache.sets = 64 << ((config1 >> 13) & 7);
+               c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
                c->dcache.ways = 1 + ((config1 >> 7) & 7);
 
                dcache_size = c->dcache.sets *
@@ -1051,6 +1051,7 @@ static void __cpuinit probe_pcache(void)
        case CPU_R14000:
                break;
 
+       case CPU_M14KC:
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
new file mode 100644 (file)
index 0000000..48a6b38
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated clear_page/copy_page functions.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2012  Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#define cpu_clear_page_function_name   clear_page_cpu
+#define cpu_copy_page_function_name    copy_page_cpu
+#else
+#define cpu_clear_page_function_name   clear_page
+#define cpu_copy_page_function_name    copy_page
+#endif
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x058 bytes
+ * R4600 v1.7:                         0x05c bytes
+ * R4600 v2.0:                         0x060 bytes
+ * With prefetching, 16 word strides   0x120 bytes
+ */
+EXPORT(__clear_page_start)
+LEAF(cpu_clear_page_function_name)
+1:     j       1b              /* Dummy, will be replaced. */
+       .space 288
+END(cpu_clear_page_function_name)
+EXPORT(__clear_page_end)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x11c bytes
+ * R4600 v1.7:                         0x080 bytes
+ * R4600 v2.0:                         0x07c bytes
+ * With prefetching, 16 word strides   0x540 bytes
+ */
+EXPORT(__copy_page_start)
+LEAF(cpu_copy_page_function_name)
+1:     j       1b              /* Dummy, will be replaced. */
+       .space 1344
+END(cpu_copy_page_function_name)
+EXPORT(__copy_page_end)
index cc0b626858b3d0b6e34e5e510004083ce7bfba24..98f530e182163146ee4bba570edc2c3013236f31 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2007  Maciej W. Rozycki
  * Copyright (C) 2008  Thiemo Seufer
+ * Copyright (C) 2012  MIPS Technologies, Inc.
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -71,45 +72,6 @@ static struct uasm_reloc __cpuinitdata relocs[5];
 #define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x058 bytes
- * R4600 v1.7:                         0x05c bytes
- * R4600 v2.0:                         0x060 bytes
- * With prefetching, 16 word strides   0x120 bytes
- */
-
-static u32 clear_page_array[0x120 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
-#else
-void clear_page(void *page) __attribute__((alias("clear_page_array")));
-#endif
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x11c bytes
- * R4600 v1.7:                         0x080 bytes
- * R4600 v2.0:                         0x07c bytes
- * With prefetching, 16 word strides   0x540 bytes
- */
-static u32 copy_page_array[0x540 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void
-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
-#else
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-#endif
-
-EXPORT_SYMBOL(copy_page);
-
-
 static int pref_bias_clear_store __cpuinitdata;
 static int pref_bias_copy_load __cpuinitdata;
 static int pref_bias_copy_store __cpuinitdata;
@@ -282,10 +244,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
                }
 }
 
+extern u32 __clear_page_start;
+extern u32 __clear_page_end;
+extern u32 __copy_page_start;
+extern u32 __copy_page_end;
+
 void __cpuinit build_clear_page(void)
 {
        int off;
-       u32 *buf = (u32 *)&clear_page_array;
+       u32 *buf = &__clear_page_start;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
@@ -356,17 +323,17 @@ void __cpuinit build_clear_page(void)
        uasm_i_jr(&buf, RA);
        uasm_i_nop(&buf);
 
-       BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+       BUG_ON(buf > &__clear_page_end);
 
        uasm_resolve_relocs(relocs, labels);
 
        pr_debug("Synthesized clear page handler (%u instructions).\n",
-                (u32)(buf - clear_page_array));
+                (u32)(buf - &__clear_page_start));
 
        pr_debug("\t.set push\n");
        pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (buf - clear_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+       for (i = 0; i < (buf - &__clear_page_start); i++)
+               pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
        pr_debug("\t.set pop\n");
 }
 
@@ -427,7 +394,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
 void __cpuinit build_copy_page(void)
 {
        int off;
-       u32 *buf = (u32 *)&copy_page_array;
+       u32 *buf = &__copy_page_start;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
@@ -595,21 +562,23 @@ void __cpuinit build_copy_page(void)
        uasm_i_jr(&buf, RA);
        uasm_i_nop(&buf);
 
-       BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+       BUG_ON(buf > &__copy_page_end);
 
        uasm_resolve_relocs(relocs, labels);
 
        pr_debug("Synthesized copy page handler (%u instructions).\n",
-                (u32)(buf - copy_page_array));
+                (u32)(buf - &__copy_page_start));
 
        pr_debug("\t.set push\n");
        pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (buf - copy_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+       for (i = 0; i < (buf - &__copy_page_start); i++)
+               pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
        pr_debug("\t.set pop\n");
 }
 
 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+extern void clear_page_cpu(void *page);
+extern void copy_page_cpu(void *to, void *from);
 
 /*
  * Pad descriptors to cacheline, since each is exclusively owned by a
index 0bc485b3cd606de49e62aff00c9fe611e3a90540..03eb0ef9158047b023ee87d8d5d0e45c67c1583f 100644 (file)
@@ -9,6 +9,7 @@
  * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ * Copyright (C) 2011  MIPS Technologies, Inc.
  *
  * ... and the days got worse and worse and now you see
  * I've gone completly out of my mind.
@@ -494,6 +495,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_R14000:
        case CPU_4KC:
        case CPU_4KEC:
+       case CPU_M14KC:
        case CPU_SB1:
        case CPU_SB1A:
        case CPU_4KSC:
index bf80921f2f56c0b145e698fa2c673ff49c7c0636..284dea54faf5a9f629e9d2cab18121cc5db328c6 100644 (file)
@@ -241,8 +241,9 @@ void __init mips_pcibios_init(void)
                return;
        }
 
-       if (controller->io_resource->start < 0x00001000UL)      /* FIXME */
-               controller->io_resource->start = 0x00001000UL;
+       /* Change start address to avoid conflicts with ACPI and SMB devices */
+       if (controller->io_resource->start < 0x00002000UL)
+               controller->io_resource->start = 0x00002000UL;
 
        iomem_resource.end &= 0xfffffffffULL;                   /* 64 GB */
        ioport_resource.end = controller->io_resource->end;
@@ -253,7 +254,7 @@ void __init mips_pcibios_init(void)
 }
 
 /* Enable PCI 2.1 compatibility in PIIX4 */
-static void __init quirk_dlcsetup(struct pci_dev *dev)
+static void __devinit quirk_dlcsetup(struct pci_dev *dev)
 {
        u8 odlc, ndlc;
        (void) pci_read_config_byte(dev, 0x82, &odlc);
index b7f37d4982fab27b91213a83be7a178626b694cb..2e28f653f66d1b582b9d0ca89c389aaaa1b18ca8 100644 (file)
@@ -111,7 +111,7 @@ static void __init pci_clock_check(void)
        unsigned int __iomem *jmpr_p =
                (unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
        int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
-       static const int pciclocks[] __initdata = {
+       static const int pciclocks[] __initconst = {
                33, 20, 25, 30, 12, 16, 37, 10
        };
        int pciclock = pciclocks[jmpr];
index acb677a1227cc043a364b8938e78d7bd911c64d3..b3df7c2aad1e144fe8be92584272b14aed53973f 100644 (file)
@@ -82,8 +82,10 @@ void __init prom_free_prom_memory(void)
 
 void xlp_mmu_init(void)
 {
+       /* enable extended TLB and Large Fixed TLB */
        write_c0_config6(read_c0_config6() | 0x24);
-       current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+       /* set page mask of Fixed TLB in config7 */
        write_c0_config7(PM_DEFAULT_MASK >>
                (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
 }
@@ -100,6 +102,10 @@ void __init prom_init(void)
        nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
 #ifdef CONFIG_SMP
        nlm_wakeup_secondary_cpus(0xffffffff);
+
+       /* update TLB size after waking up threads */
+       current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
        register_smp_ops(&nlm_smp_ops);
 #endif
 }
index d1f2d4c52d42d3a0e1e92f5605d81632eefc8aa6..b6e378211a2c940021fb2e7befb342b90b1b1777 100644 (file)
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 
        switch (current_cpu_type()) {
        case CPU_5KC:
+       case CPU_M14KC:
        case CPU_20KC:
        case CPU_24K:
        case CPU_25KF:
index baba3bcaa3c28100067a39d9e1a1132a5a8d02b4..4d80a856048d19261e3af077ff1ff5344b3b26d2 100644 (file)
@@ -322,6 +322,10 @@ static int __init mipsxx_init(void)
 
        op_model_mipsxx_ops.num_counters = counters;
        switch (current_cpu_type()) {
+       case CPU_M14KC:
+               op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
+               break;
+
        case CPU_20KC:
                op_model_mipsxx_ops.cpu_type = "mips/20K";
                break;
index d5d4c018fb04c03f4ef6b1d26d05fd263ad4835e..0857ab8c3919750feb164b3649f60df502290312 100644 (file)
@@ -48,7 +48,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
 
@@ -60,7 +60,7 @@ static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
        pci_write_config_dword(pdev, 0xe4, 1 << 5);
 }
 
-static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char c;
 
@@ -135,7 +135,7 @@ static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
        printk(KERN_INFO"via686b fix: ISA bridge done\n");
 }
 
-static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev)
 {
        printk(KERN_INFO"via686b fix: IDE\n");
 
@@ -168,19 +168,19 @@ static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
        printk(KERN_INFO"via686b fix: IDE done\n");
 }
 
-static void __init loongson2e_686b_func2_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func2_fixup(struct pci_dev *pdev)
 {
        /* irq routing */
        pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
 }
 
-static void __init loongson2e_686b_func3_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func3_fixup(struct pci_dev *pdev)
 {
        /* irq routing */
        pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
 }
 
-static void __init loongson2e_686b_func5_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func5_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
        unsigned char c;
index 4b9768d5d72948b200d7431b4754fb7c21a27bc9..a7b917dcf604bde1ee359b20fb203356c0e4a84e 100644 (file)
@@ -96,21 +96,21 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
 }
 
 /* CS5536 SPEC. fixup */
-static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_isa_fixup(struct pci_dev *pdev)
 {
        /* the uart1 and uart2 interrupt in PIC is enabled as default */
        pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
        pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
 }
 
-static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ide_fixup(struct pci_dev *pdev)
 {
        /* setting the mutex pin as IDE function */
        pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
                               CS5536_IDE_FLASH_SIGNATURE);
 }
 
-static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev)
 {
        /* enable the AUDIO interrupt in PIC  */
        pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
@@ -118,14 +118,14 @@ static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
        pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
 }
 
-static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
 {
        /* enable the OHCI interrupt in PIC */
        /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
        pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
 }
 
-static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
 {
        u32 hi, lo;
 
@@ -137,7 +137,7 @@ static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
        pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
 }
 
-static void __init loongson_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson_nec_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
 
index 0f48498bc231b074f1ec8b401e9660b29bf37e5c..70073c98ed320dc6f88f457028c8b9cd094e93ab 100644 (file)
@@ -49,10 +49,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
-       static int piixirqmap[16] __initdata = {  /* PIIX PIRQC[A:D] irq mappings */
+       static int piixirqmap[16] __devinitdata = {  /* PIIX PIRQC[A:D] irq mappings */
                0,  0,  0,  3,
                4,  5,  6,  7,
                0,  9, 10, 11,
@@ -83,7 +83,7 @@ static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
         malta_piix_func0_fixup);
 
-static void __init malta_piix_func1_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
 
index e08f49cb6875abd65d50b7cb1cee964f816d96b7..8e4f8288eca2e2fcfc17bd615a26a5e0a6a152de 100644 (file)
 
 #include <asm/vr41xx/mpc30x.h>
 
-static const int internal_func_irqs[] __initdata = {
+static const int internal_func_irqs[] __initconst = {
        VRC4173_CASCADE_IRQ,
        VRC4173_AC97_IRQ,
        VRC4173_USB_IRQ,
 };
 
-static const int irq_tab_mpc30x[] __initdata = {
+static const int irq_tab_mpc30x[] __initconst = {
  [12] = VRC4173_PCMCIA1_IRQ,
  [13] = VRC4173_PCMCIA2_IRQ,
  [29] = MQ200_IRQ,
index f0bb9146e6c038424281cd45bb5b3ebc9dab0815..d02900a72916869dc39a03d0dd2075a5f8e85d82 100644 (file)
@@ -15,7 +15,7 @@
  * Set the BCM1250, etc. PCI host bridge's TRDY timeout
  * to the finite max.
  */
-static void __init quirk_sb1250_pci(struct pci_dev *dev)
+static void __devinit quirk_sb1250_pci(struct pci_dev *dev)
 {
        pci_write_config_byte(dev, 0x40, 0xff);
 }
@@ -25,7 +25,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
 /*
  * The BCM1250, etc. PCI/HT bridge reports as a host bridge.
  */
-static void __init quirk_sb1250_ht(struct pci_dev *dev)
+static void __devinit quirk_sb1250_ht(struct pci_dev *dev)
 {
        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 }
@@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
 /*
  * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
  */
-static void __init quirk_sp1011(struct pci_dev *dev)
+static void __devinit quirk_sp1011(struct pci_dev *dev)
 {
        pci_write_config_byte(dev, 0x64, 0xff);
 }
index a1e7e6d80c8c718e9b532e5ccb8ff0e7bac9e164..bc13e29d2bb34d6f9a257322e222b943da533c1c 100644 (file)
@@ -495,7 +495,7 @@ irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
 }
 
 #ifdef CONFIG_TOSHIBA_FPCIB0
-static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
 {
        struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
 
index 0fbe4c0c170a25f5af0bd0da083a14c47e82e5f7..fdc24440294c7028c6fdf7e2a655b896dbe60ef7 100644 (file)
@@ -212,7 +212,7 @@ static inline void pci_enable_swapping(struct pci_dev *dev)
        bridge->b_widget.w_tflush;      /* Flush */
 }
 
-static void __init pci_fixup_ioc3(struct pci_dev *d)
+static void __devinit pci_fixup_ioc3(struct pci_dev *d)
 {
        pci_disable_swapping(d);
 }
index ea453532a33c6dfc0eeb49659b2dc9036494713a..075d87acd12ac4158e090b7918250e958f0d8c05 100644 (file)
@@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
 
        /* setup reset gpio used by pci */
        reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
-       if (reset_gpio > 0)
+       if (gpio_is_valid(reset_gpio))
                devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
 
        /* enable auto-switching between PCI and EBU */
@@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
        ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
 
        /* toggle reset pin */
-       if (reset_gpio > 0) {
+       if (gpio_is_valid(reset_gpio)) {
                __gpio_set_value(reset_gpio, 0);
                wmb();
                mdelay(1);
index 1644805a6730db188bf9f26245c947551e8a4b5b..172af1cd58672e137924e88dae7f37dcf82393be 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/irq.h>
 #include <linux/irqdesc.h>
 #include <linux/console.h>
+#include <linux/pci_regs.h>
 
 #include <asm/io.h>
 
@@ -156,35 +157,55 @@ struct pci_controller nlm_pci_controller = {
        .io_offset      = 0x00000000UL,
 };
 
+/*
+ * The top level PCIe links on the XLS PCIe controller appear as
+ * bridges. Given a device, this function finds which link it is
+ * on.
+ */
+static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
+{
+       struct pci_bus *bus, *p;
+
+       /* Find the bridge on bus 0 */
+       bus = dev->bus;
+       for (p = bus->parent; p && p->number != 0; p = p->parent)
+               bus = p;
+
+       return p ? bus->self : NULL;
+}
+
 static int get_irq_vector(const struct pci_dev *dev)
 {
+       struct pci_dev *lnk;
+
        if (!nlm_chip_is_xls())
-               return  PIC_PCIX_IRQ;   /* for XLR just one IRQ*/
+               return  PIC_PCIX_IRQ;   /* for XLR just one IRQ */
 
        /*
         * For XLS PCIe, there is an IRQ per Link, find out which
         * link the device is on to assign interrupts
-       */
-       if (dev->bus->self == NULL)
+        */
+       lnk = xls_get_pcie_link(dev);
+       if (lnk == NULL)
                return 0;
 
-       switch  (dev->bus->self->devfn) {
-       case 0x0:
+       switch  (PCI_SLOT(lnk->devfn)) {
+       case 0:
                return PIC_PCIE_LINK0_IRQ;
-       case 0x8:
+       case 1:
                return PIC_PCIE_LINK1_IRQ;
-       case 0x10:
+       case 2:
                if (nlm_chip_is_xls_b())
                        return PIC_PCIE_XLSB0_LINK2_IRQ;
                else
                        return PIC_PCIE_LINK2_IRQ;
-       case 0x18:
+       case 3:
                if (nlm_chip_is_xls_b())
                        return PIC_PCIE_XLSB0_LINK3_IRQ;
                else
                        return PIC_PCIE_LINK3_IRQ;
        }
-       WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
+       WARN(1, "Unexpected devfn %d\n", lnk->devfn);
        return 0;
 }
 
@@ -202,7 +223,27 @@ void arch_teardown_msi_irq(unsigned int irq)
 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
 {
        struct msi_msg msg;
+       struct pci_dev *lnk;
        int irq, ret;
+       u16 val;
+
+       /* MSI not supported on XLR */
+       if (!nlm_chip_is_xls())
+               return 1;
+
+       /*
+        * Enable MSI on the XLS PCIe controller bridge which was disabled
+        * at enumeration, the bridge MSI capability is at 0x50
+        */
+       lnk = xls_get_pcie_link(dev);
+       if (lnk == NULL)
+               return 1;
+
+       pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
+       if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
+               val |= PCI_MSI_FLAGS_ENABLE;
+               pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
+       }
 
        irq = get_irq_vector(dev);
        if (irq <= 0)
@@ -327,7 +368,7 @@ static int __init pcibios_init(void)
                }
        } else {
                /* XLR PCI controller ACK */
-               irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack);
+               irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
        }
 
        return 0;
index b71fae231049dfa86d25f3e5fef2216d968faa17..5edab2bc6fc0c3314d5925d2197781a7f563b4df 100644 (file)
@@ -115,11 +115,11 @@ static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
  */
 static void __cpuinit yos_init_secondary(void)
 {
-       set_c0_status(ST0_CO | ST0_IE | ST0_IM);
 }
 
 static void __cpuinit yos_smp_finish(void)
 {
+       set_c0_status(ST0_CO | ST0_IM | ST0_IE);
 }
 
 /* Hook for after all CPUs are online */
index 87167dcc79fa24bf694a9e2d7bc69890a4eab2ef..05a1d922cd60871ef226cc792a448c0d2f295793 100644 (file)
@@ -244,11 +244,6 @@ static struct platform_device pnx833x_sata_device = {
        .resource      = pnx833x_sata_resources,
 };
 
-static const char *part_probes[] = {
-       "cmdlinepart",
-       NULL
-};
-
 static void
 pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
@@ -268,7 +263,6 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
        .chip = {
                .nr_chips               = 1,
                .chip_delay             = 25,
-               .part_probe_types       = part_probes,
        },
        .ctrl = {
                .cmd_ctrl               = pnx833x_flash_nand_cmd_ctrl
index 0a170e0ffeaae4ea84e0d5bfe1480203e69524be..7773f3d956b0cdc914ab82cb7c2fd79664c558f8 100644 (file)
@@ -28,7 +28,7 @@
 
 #define CALLIOPE_ADDR(x)       (CALLIOPE_IO_BASE + (x))
 
-const struct register_map calliope_register_map __initdata = {
+const struct register_map calliope_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
        .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
        .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
index bbc0c122be5ee34c3ece4cb2fdee9ddf8efef384..da076db7b7ed2fd572d6429231b370e381a10e69 100644 (file)
@@ -28,7 +28,7 @@
 
 #define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
 
-const struct register_map cronus_register_map __initdata = {
+const struct register_map cronus_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
        .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
        .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
index 91dda682752ce25ece90b800a9103bb33cd80c86..47683b370e748da98a21860ce04424e8f8459b36 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/init.h>
 #include <asm/mach-powertv/asic.h>
 
-const struct register_map gaia_register_map __initdata = {
+const struct register_map gaia_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
        .eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
        .eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
index 4a05bb096476a1ef7751f528c16d9ea9015827a9..6ff4b10f09dab4e4c3ba35242db83654315df0ab 100644 (file)
@@ -28,7 +28,7 @@
 
 #define ZEUS_ADDR(x)   (ZEUS_IO_BASE + (x))
 
-const struct register_map zeus_register_map __initdata = {
+const struct register_map zeus_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
        .eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
        .eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
index ea774285e6c500b43ab0d915b3c292aa091eb307..716e9a12f0e77a0c85f492d3cf760ce41059690a 100644 (file)
@@ -293,7 +293,6 @@ static void __init rb532_nand_setup(void)
        rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info);
        rb532_nand_data.chip.partitions = rb532_partition_info;
        rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY;
-       rb532_nand_data.chip.options = NAND_NO_AUTOINCR;
 }
 
 
index 682efb0c108d22576992c9f554653e9329916d3b..64eb71b1528032ea4af4f084fbbe9cf6c8d91216 100644 (file)
@@ -269,7 +269,7 @@ txx9_i8259_irq_setup(int irq)
        return err;
 }
 
-static void __init quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev)
 {
        int irq;        /* PCI/ISA Bridge interrupt */
        u8 reg_64;
index ab506181ec3108ad98c2db77d73f01e2e2b9b134..d31eeea480cfdda8a4231351abbf61c76da08a5f 100644 (file)
@@ -20,9 +20,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 55b79ef10028cbc3253685acbf0979edd6784d08..44251b974f1d96d4931e4704d9433163eb070962 100644 (file)
@@ -81,9 +81,6 @@ struct pt_regs {
 #define PTRACE_GETFPREGS          14
 #define PTRACE_SETFPREGS          15
 
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD     0x00000001
-
 #ifdef __KERNEL__
 
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
index 08251d6f6b11015b5d8d265cba4fe88fc70e0f1c..ac519bbd42ffe32693a02d3f88ac5dbd21d426ff 100644 (file)
@@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void)
 }
 
 #ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_info(struct thread_info *ti);
 #endif
 #define get_thread_info(ti)    get_task_struct((ti)->task)
 #define put_thread_info(ti)    put_task_struct((ti)->task)
index bd4e90dfe6c26d37c366abec8275a1d0d1e7b774..f8e66425cbf826f2f62aef4756869442a9ed5fdc 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef _ASM_TIMEX_H
 #define _ASM_TIMEX_H
 
-#include <asm/hardirq.h>
 #include <unit/timex.h>
 
 #define TICK_SIZE (tick_nsec / 1000)
@@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void)
 extern int init_clockevents(void);
 extern int init_clocksource(void);
 
-static inline void setup_jiffies_interrupt(int irq,
-                                          struct irqaction *action)
-{
-       u16 tmp;
-       setup_irq(irq, action);
-       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
-       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       tmp = GxICR(irq);
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMEX_H */
index 69cae0260786207d0c3b62fde6255f6b2f442995..ccce35e3e179150c87f5f94af2a2463333236a68 100644 (file)
@@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev)
 {
 }
 
+static inline void setup_jiffies_interrupt(int irq,
+                                          struct irqaction *action)
+{
+       u16 tmp;
+       setup_irq(irq, action);
+       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+       tmp = GxICR(irq);
+}
+
 int __init init_clockevents(void)
 {
        struct clock_event_device *cd;
index a5ac755dd69f4acbc8c6d213c47c285b8af41098..2df440105a80f78e111f4f863261cbb7abab10ae 100644 (file)
@@ -9,6 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+#include <linux/irqreturn.h>
+
 struct clocksource;
 struct clock_event_device;
 
index 2381df83bd0064287110896a2cfb0a2678a8a046..35932a8de8b8d299fd7aaebcf31d56eb7c6db6d3 100644 (file)
@@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
        case SC1TXIRQ:
 #ifdef CONFIG_MN10300_TTYSM1_TIMER12
        case TM12IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER9
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER9)
        case TM9IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER3
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
        case TM3IRQ:
 #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
 #endif /* CONFIG_MN10300_TTYSM1 */
index 890cf91767cc8486b4fd004cf8ca6e44c9b94a28..4d584ae29ae1c1c2c53746ee3a6d1d9b78e7042a 100644 (file)
@@ -31,8 +31,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -163,7 +161,6 @@ asmlinkage long sys_sigreturn(void)
                             sizeof(frame->extramask)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(current_frame(), &frame->sc, &d0))
@@ -191,7 +188,6 @@ asmlinkage long sys_rt_sigreturn(void)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
@@ -430,8 +426,9 @@ static inline void stepback(struct pt_regs *regs)
  */
 static int handle_signal(int sig,
                         siginfo_t *info, struct k_sigaction *ka,
-                        sigset_t *oldset, struct pt_regs *regs)
+                        struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -461,11 +458,12 @@ static int handle_signal(int sig,
                ret = setup_rt_frame(sig, ka, info, oldset, regs);
        else
                ret = setup_frame(sig, ka, oldset, regs);
+       if (ret)
+               return ret;
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       signal_delivered(sig, info, ka, regs,
+                        test_thread_flag(TIF_SINGLESTEP));
+       return 0;
 }
 
 /*
@@ -475,7 +473,6 @@ static void do_signal(struct pt_regs *regs)
 {
        struct k_sigaction ka;
        siginfo_t info;
-       sigset_t *oldset;
        int signr;
 
        /* we want the common case to go fast, which is why we may in certain
@@ -483,23 +480,9 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                                test_thread_flag(TIF_SINGLESTEP));
+               if (handle_signal(signr, &info, &ka, regs) == 0) {
                }
 
                return;
@@ -525,10 +508,7 @@ static void do_signal(struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -548,13 +528,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
        }
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(current_frame());
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 94a9c6d53e1b890ea98d987628fad089d5fd012d..b900e5afa0aefae7969666fbb1cc9cf2e77d5994 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kdebug.h>
 #include <linux/bug.h>
 #include <linux/irq.h>
+#include <linux/export.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/io.h>
index 159acb02cfd4c16dcd1e91f9268b8d9dc6e74463..e244ebe637e15436f643a63e28217d9714321645 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
+#include <linux/export.h>
 #include <asm/io.h>
 
 static unsigned long pci_sram_allocated = 0xbc000000;
index cc18fe7d8b90e2abc9061b430b7072386aeed0d8..c37f9832cf17d0a7cb5b8795766aa4c5feebd88e 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index 43c246439413b8c5f70d9b50d79b733443e42882..53677694b16554af24b8c8262191673426ae193c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/timex.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index 758af30d1a16aad5b74820431e5abda8cc4ed1df..4cefc224f448d98843fe44c7e67631e98fa37466 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index e1becd6b757132bd5664e82c5a676250ad660c3a..bc4adfaf815c1c2276c2f0f18bd84924b10c12d7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/setup.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index ddb7ed0107065e19132dc242cd5b42b361c80f21..42f32db75087cc36b2f089ee6cd7c1bd3629e35b 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index e970743251ae3c922fb4ff4f9722f4dc596f1f6f..30110297f4f9509d6437c8f5c3e80220add6e2a9 100644 (file)
@@ -33,8 +33,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage long
 _sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
 {
@@ -101,7 +99,6 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -251,20 +248,19 @@ give_sigsegv:
        return -EFAULT;
 }
 
-static inline int
+static inline void
 handle_signal(unsigned long sig,
              siginfo_t *info, struct k_sigaction *ka,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
        int ret;
 
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
+       ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
        if (ret)
-               return ret;
-
-       block_sigmask(ka, sig);
+               return;
 
-       return 0;
+       signal_delivered(sig, info, ka, regs,
+                                test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -339,30 +335,10 @@ void do_signal(struct pt_regs *regs)
        if (signr <= 0) {
                /* no signal to deliver so we just put the saved sigmask
                 * back */
-               if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-               }
-
+               restore_saved_sigmask();
        } else {                /* signr > 0 */
-               sigset_t *oldset;
-
-               if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK)
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-
                /* Whee!  Actually deliver the signal.  */
-               if (!handle_signal(signr, &info, &ka, oldset, regs)) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
-               tracehook_signal_handler(signr, &info, &ka, regs,
-                                        test_thread_flag(TIF_SINGLESTEP));
+               handle_signal(signr, &info, &ka, regs);
        }
 
        return;
@@ -376,7 +352,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs)
        if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index ddb8b24b823d1c77881f80cc019570c537ea6c62..3ff21b536f28f6c1e84b06b665e6d7589579f6c9 100644 (file)
@@ -18,6 +18,7 @@ config PARISC
        select IRQ_PER_CPU
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_STRNCPY_FROM_USER
 
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
index dbc3850b1d0dd3ea3a32d94aee31e6f1af2496ec..5707f1a62341d799a75136337ebb13fbfd1cde63 100644 (file)
@@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig
 
 NM             = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
+LIBGCC         = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
 MACHINE                := $(shell uname -m)
 ifeq ($(MACHINE),parisc*)
@@ -79,7 +80,7 @@ kernel-y                      := mm/ kernel/ math-emu/
 kernel-$(CONFIG_HPUX)          += hpux/
 
 core-y += $(addprefix arch/parisc/, $(kernel-y))
-libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
+libs-y += arch/parisc/lib/ $(LIBGCC)
 
 drivers-$(CONFIG_OPROFILE)             += arch/parisc/oprofile/
 
index 19a434f5505974e63f28a00d697dfd42b57f9345..4383707d9801eb7bfe2b1bb34be6bbea447be139 100644 (file)
@@ -1,3 +1,4 @@
 include include/asm-generic/Kbuild.asm
 
 header-y += pdc.h
+generic-y += word-at-a-time.h
index 72cfdb0cfdd157485e2acd4a27de5c7788b483c1..62a33338549c18d6bb32d0a08c7e0728808767fe 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _PARISC_BUG_H
 #define _PARISC_BUG_H
 
+#include <linux/kernel.h>      /* for BUGFLAG_TAINT */
+
 /*
  * Tell the user there is some problem.
  * The offending file and line are encoded in the __bug_table section.
index 5212b0357daf15aaf454b751b0eb146fc47b34ac..b9344256f76b365db1c6a9a99b08b337f706970c 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short         __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short         __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short         __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index e8f8037d872bc91c794f50ba5fd7a0f8eb996232..a5dc9066c6d8d50cb35f2e4cf88509fa4621b84b 100644 (file)
@@ -25,7 +25,6 @@ typedef unsigned long address_t;
 #define cpu_number_map(cpu)    (cpu)
 #define cpu_logical_map(cpu)   (cpu)
 
-extern void smp_send_reschedule(int cpu);
 extern void smp_send_all_nop(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
@@ -50,6 +49,5 @@ static inline void __cpu_die (unsigned int cpu) {
   while(1)
     ;
 }
-extern int __cpu_up (unsigned int cpu);
 
 #endif /*  __ASM_SMP_H */
index 9d5fbbc5c31f14df4791b1005e83ade0cd0505f4..d76fbda5d62c0437f5fb52c389e144597f054b12 100644 (file)
@@ -7,7 +7,7 @@ struct stat {
        unsigned int    st_dev;         /* dev_t is 32 bits on parisc */
        ino_t           st_ino;         /* 32 bits */
        mode_t          st_mode;        /* 16 bits */
-       nlink_t         st_nlink;       /* 16 bits */
+       unsigned short  st_nlink;       /* 16 bits */
        unsigned short  st_reserved1;   /* old st_uid */
        unsigned short  st_reserved2;   /* old st_gid */
        unsigned int    st_rdev;
@@ -42,7 +42,7 @@ struct hpux_stat64 {
        unsigned int    st_dev;         /* dev_t is 32 bits on parisc */
        ino_t           st_ino;         /* 32 bits */
        mode_t          st_mode;        /* 16 bits */
-       nlink_t         st_nlink;       /* 16 bits */
+       unsigned short  st_nlink;       /* 16 bits */
        unsigned short  st_reserved1;   /* old st_uid */
        unsigned short  st_reserved2;   /* old st_gid */
        unsigned int    st_rdev;
index 83ae7dd4d99ea721adbdc16a751435db902da354..22b4726dee494403c80bdf2f88c583ba32043b94 100644 (file)
@@ -74,7 +74,7 @@ struct thread_info {
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
-                                 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
+                                 _TIF_NEED_RESCHED)
 
 #endif /* __KERNEL__ */
 
index 9ac066086f030fc4080ea24be0d557afd7c8aae8..4ba2c93770f1f47c83226dc97b6e25c89d9b0bd6 100644 (file)
@@ -218,15 +218,14 @@ struct exception_data {
 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
-extern long lstrncpy_from_user(char *, const char __user *, long);
+extern long strncpy_from_user(char *, const char __user *, long);
 extern unsigned lclear_user(void __user *,unsigned long);
 extern long lstrnlen_user(const char __user *,long);
-
 /*
  * Complex access routines -- macros
  */
+#define user_addr_max() (~0UL)
 
-#define strncpy_from_user lstrncpy_from_user
 #define strnlen_user lstrnlen_user
 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
 #define clear_user lclear_user
index 5350342170218f635e4e231a4d10b1147210a1bf..18670a078849b96d92d4dd4ff406a79c2caa28a4 100644 (file)
         * entry (identifying the physical page) and %r23 up with
         * the from tlb entry (or nothing if only a to entry---for
         * clear_user_page_asm) */
-       .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault
+       .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
        cmpib,COND(<>),n 0,\spc,\fault
        ldil            L%(TMPALIAS_MAP_START),\tmp
 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
         */
        cmpiclr,=       0x01,\tmp,%r0
        ldi             (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
-#ifdef CONFIG_64BIT
+.ifc \patype,20
        depd,z          \prot,8,7,\prot
-#else
+.else
+.ifc \patype,11
        depw,z          \prot,8,7,\prot
-#endif
+.else
+       .error "undefined PA type to do_alias"
+.endif
+.endif
        /*
         * OK, it is in the temp alias region, check whether "from" or "to".
         * Check "subtle" note in pacache.S re: r23/r26.
@@ -920,7 +924,7 @@ intr_check_sig:
        /* As above */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19
-       ldi     (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
+       ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
        and,COND(<>)    %r19, %r20, %r0
        b,n     intr_restore    /* skip past if we've nothing to do */
 
@@ -1189,7 +1193,7 @@ dtlb_miss_20w:
        nop
 
 dtlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
 
        idtlbt          pte,prot
 
@@ -1213,7 +1217,7 @@ nadtlb_miss_20w:
        nop
 
 nadtlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
        idtlbt          pte,prot
 
@@ -1245,7 +1249,7 @@ dtlb_miss_11:
        nop
 
 dtlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,11
 
        idtlba          pte,(va)
        idtlbp          prot,(va)
@@ -1277,7 +1281,7 @@ nadtlb_miss_11:
        nop
 
 nadtlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,11
 
        idtlba          pte,(va)
        idtlbp          prot,(va)
@@ -1304,7 +1308,7 @@ dtlb_miss_20:
        nop
 
 dtlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
        
        idtlbt          pte,prot
 
@@ -1330,7 +1334,7 @@ nadtlb_miss_20:
        nop
 
 nadtlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
        idtlbt          pte,prot
 
@@ -1457,7 +1461,7 @@ naitlb_miss_20w:
        nop
 
 naitlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
 
        iitlbt          pte,prot
 
@@ -1511,7 +1515,7 @@ naitlb_miss_11:
        nop
 
 naitlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,itlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,itlb_fault,11
 
        iitlba          pte,(%sr0, va)
        iitlbp          prot,(%sr0, va)
@@ -1557,7 +1561,7 @@ naitlb_miss_20:
        nop
 
 naitlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
 
        iitlbt          pte,prot
 
@@ -2028,7 +2032,7 @@ syscall_check_resched:
        .import do_signal,code
 syscall_check_sig:
        LDREG   TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
-       ldi     (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
+       ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
        and,COND(<>)    %r19, %r26, %r0
        b,n     syscall_restore /* skip past if we've nothing to do */
 
index a7bb757a5497137d894518d1dc9d252cb845bb49..ceec85de62904a1892c0fb30eca7eb034e627e53 100644 (file)
@@ -44,7 +44,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
 #endif
 
 #include <asm/uaccess.h>
-EXPORT_SYMBOL(lstrncpy_from_user);
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
index 4b9cb0d546d132fb39c5ba57d3986f3b109dff48..594459bde14ed3c927b7dddce88e1d9cf9863e74 100644 (file)
@@ -48,9 +48,6 @@
 #define DBG(LEVEL, ...)
 #endif
        
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* gcc will complain if a pointer is cast to an integer of different
  * size.  If you really need to do this (and we do for an ELF32 user
  * application in an ELF64 kernel) then you have to do a cast to an
@@ -131,7 +128,6 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
                        goto give_sigsegv;
        }
                
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        /* Good thing we saved the old gr[30], eh? */
@@ -443,8 +439,9 @@ give_sigsegv:
 
 static long
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-               sigset_t *oldset, struct pt_regs *regs, int in_syscall)
+               struct pt_regs *regs, int in_syscall)
 {
+       sigset_t *oldset = sigmask_to_save();
        DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
               sig, ka, info, oldset, regs);
        
@@ -452,12 +449,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
                return 0;
 
-       block_sigmask(ka, sig);
-
-       tracehook_signal_handler(sig, info, ka, regs, 
+       signal_delivered(sig, info, ka, regs, 
                test_thread_flag(TIF_SINGLESTEP) ||
                test_thread_flag(TIF_BLOCKSTEP));
 
+       DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
+               regs->gr[28]);
+
        return 1;
 }
 
@@ -568,28 +566,17 @@ do_signal(struct pt_regs *regs, long in_syscall)
        siginfo_t info;
        struct k_sigaction ka;
        int signr;
-       sigset_t *oldset;
 
-       DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
-              oldset, regs, regs->sr[7], in_syscall);
+       DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n",
+              regs, regs->sr[7], in_syscall);
 
        /* Everyone else checks to see if they are in kernel mode at
           this point and exits if that's the case.  I'm not sure why
           we would be called in that case, but for some reason we
           are. */
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
-       DBG(1,"do_signal: oldset %08lx / %08lx\n", 
-               oldset->sig[0], oldset->sig[1]);
-
-
        /* May need to force signal if handle_signal failed to deliver */
        while (1) {
-         
                signr = get_signal_to_deliver(&info, &ka, regs, NULL);
                DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); 
        
@@ -603,14 +590,8 @@ do_signal(struct pt_regs *regs, long in_syscall)
                /* Whee!  Actually deliver the signal.  If the
                   delivery failed, we need to continue to iterate in
                   this loop so we can deliver the SIGSEGV... */
-               if (handle_signal(signr, &info, &ka, oldset,
-                                 regs, in_syscall)) {
-                       DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
-                               regs->gr[28]);
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               if (handle_signal(signr, &info, &ka, regs, in_syscall))
                        return;
-               }
        }
        /* end of while(1) looping forever if we can't force a signal */
 
@@ -621,24 +602,16 @@ do_signal(struct pt_regs *regs, long in_syscall)
        DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", 
                regs->gr[28]);
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
-
-       return;
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, long in_syscall)
 {
-       if (test_thread_flag(TIF_SIGPENDING) ||
-           test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs, in_syscall);
 
        if (test_thread_flag(TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index e14132430762414166a729ae57e647ec5b9f1316..fd49aeda9eb8b0c4fef656e3dc00228c59251b99 100644 (file)
@@ -47,8 +47,6 @@
 #define DBG(LEVEL, ...)
 #endif
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 inline void
 sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
 {
index fa6f2b8163e03cc1bdc953bbb1e2b8282b826c7a..64a999882e4fb8d0d584da223c7b1f43842e8d2c 100644 (file)
@@ -50,8 +50,10 @@ SECTIONS
        . = KERNEL_BINARY_TEXT_START;
 
        _text = .;              /* Text and read-only data */
-       .text ALIGN(16) : {
+       .head ALIGN(16) : {
                HEAD_TEXT
+       } = 0
+       .text ALIGN(16) : {
                TEXT_TEXT
                SCHED_TEXT
                LOCK_TEXT
@@ -65,7 +67,7 @@ SECTIONS
                *(.fixup)
                *(.lock.text)           /* out-of-line lock text */
                *(.gnu.warning)
-       } = 0
+       }
        /* End of text section */
        _etext = .;
 
index 1bd23ccec17b9a53fb838fdfdf5eb9ab62ccf739..6f2d9355efe25af6ab90d4205a216c1c649c39a9 100644 (file)
        bv          %r0(%r1)
        .endm
 
-       /*
-        * long lstrncpy_from_user(char *dst, const char *src, long n)
-        *
-        * Returns -EFAULT if exception before terminator,
-        *         N if the entire buffer filled,
-        *         otherwise strlen (i.e. excludes zero byte)
-        */
-
-ENTRY(lstrncpy_from_user)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-       comib,=     0,%r24,$lsfu_done
-       copy        %r24,%r23
-       get_sr
-1:      ldbs,ma     1(%sr1,%r25),%r1
-$lsfu_loop:
-       stbs,ma     %r1,1(%r26)
-       comib,=,n   0,%r1,$lsfu_done
-       addib,<>,n  -1,%r24,$lsfu_loop
-2:      ldbs,ma     1(%sr1,%r25),%r1
-$lsfu_done:
-       sub         %r23,%r24,%r28
-$lsfu_exit:
-       bv          %r0(%r2)
-       nop
-       .exit
-ENDPROC(lstrncpy_from_user)
-
-       .section .fixup,"ax"
-3:      fixup_branch $lsfu_exit
-       ldi         -EFAULT,%r28
-       .previous
-
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN 1b,3b
-       ASM_ULONG_INSN 2b,3b
-       .previous
-
-       .procend
-
        /*
         * unsigned long lclear_user(void *to, unsigned long n)
         *
index c9aac24b02e267ab9d171af7d1087863e0ef0dc8..0554ab062bdc555bdd1d28fb655519b8a39fcbe5 100644 (file)
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
 }
 
 #ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory")
 #else
 #define __hard_irq_enable()    __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
 #define __hard_irq_disable()   __mtmsrd(local_paca->kernel_msr, 1)
@@ -100,6 +100,14 @@ static inline void hard_irq_disable(void)
        get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
 }
 
+/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
+#define hard_irq_disable       hard_irq_disable
+
+static inline bool lazy_irq_pending(void)
+{
+       return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
 /*
  * This is called by asynchronous interrupts to conditionally
  * re-enable hard interrupts when soft-disabled after having
@@ -117,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
        return !regs->softe;
 }
 
+extern bool prep_irq_for_idle(void);
+
 #else /* CONFIG_PPC64 */
 
 #define SET_MSR_EE(x)  mtmsr(x)
index f1393252bbdad837c97b794c8534328a9c912ee3..2958c5b97b2dd4100ac5907129b4736d94458cf7 100644 (file)
@@ -16,9 +16,6 @@ typedef int           __kernel_ssize_t;
 typedef long           __kernel_ptrdiff_t;
 #define __kernel_size_t __kernel_size_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef short          __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 #endif
index e4edc510b530cfed6420e96012a69dc424a34e79..84880b80cc1ce7811924ec18e68bbd75431371f6 100644 (file)
@@ -30,11 +30,11 @@ struct stat {
        unsigned long   st_dev;
        ino_t           st_ino;
 #ifdef __powerpc64__
-       nlink_t         st_nlink;
+       unsigned long   st_nlink;
        mode_t          st_mode;
 #else
        mode_t          st_mode;
-       nlink_t         st_nlink;
+       unsigned short  st_nlink;
 #endif
        uid_t           st_uid;
        gid_t           st_gid;
index a556ccc16b58d4560b004c1ee797f8e92c7c6506..68831e9cf82f01ddf8f12164962d8db438b93af3 100644 (file)
@@ -140,7 +140,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->local_flags |= _TLF_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
+               return false;
+       ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
+       return true;
 }
 
 static inline bool test_thread_local_flags(unsigned int flags)
index ed1718feb9d9c5e8ee2c55241663d31d99717e15..5971c85df1369780decc5dbc6feb0b10f9d23a05 100644 (file)
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
        mtmsrd  r10,1             /* Update machine state */
 #endif /* CONFIG_PPC_BOOK3E */
 
-#ifdef CONFIG_PREEMPT
        clrrdi  r9,r1,THREAD_SHIFT      /* current_thread_info() */
-       li      r0,_TIF_NEED_RESCHED    /* bits to check */
        ld      r3,_MSR(r1)
        ld      r4,TI_FLAGS(r9)
-       /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
-       rlwimi  r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
-       and.    r0,r4,r0        /* check NEED_RESCHED and maybe SIGPENDING */
-       bne     do_work
-
-#else /* !CONFIG_PREEMPT */
-       ld      r3,_MSR(r1)     /* Returning to user mode? */
        andi.   r3,r3,MSR_PR
-       beq     restore         /* if not, just restore regs and return */
+       beq     resume_kernel
 
        /* Check current_thread_info()->flags */
+       andi.   r0,r4,_TIF_USER_WORK_MASK
+       beq     restore
+
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq     1f
+       bl      .restore_interrupts
+       bl      .schedule
+       b       .ret_from_except_lite
+
+1:     bl      .save_nvgprs
+       bl      .restore_interrupts
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_notify_resume
+       b       .ret_from_except
+
+resume_kernel:
+#ifdef CONFIG_PREEMPT
+       /* Check if we need to preempt */
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq+    restore
+       /* Check that preempt_count() == 0 and interrupts are enabled */
+       lwz     r8,TI_PREEMPT(r9)
+       cmpwi   cr1,r8,0
+       ld      r0,SOFTE(r1)
+       cmpdi   r0,0
+       crandc  eq,cr1*4+eq,eq
+       bne     restore
+
+       /*
+        * Here we are preempting the current task. We want to make
+        * sure we are soft-disabled first
+        */
+       SOFT_DISABLE_INTS(r3,r4)
+1:     bl      .preempt_schedule_irq
+
+       /* Re-test flags and eventually loop */
        clrrdi  r9,r1,THREAD_SHIFT
        ld      r4,TI_FLAGS(r9)
-       andi.   r0,r4,_TIF_USER_WORK_MASK
-       bne     do_work
-#endif /* !CONFIG_PREEMPT */
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       bne     1b
+#endif /* CONFIG_PREEMPT */
 
        .globl  fast_exc_return_irq
 fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
 #endif /* CONFIG_PPC_BOOK3E */
 1:     b       .ret_from_except /* What else to do here ? */
  
-
-
-3:
-do_work:
-#ifdef CONFIG_PREEMPT
-       andi.   r0,r3,MSR_PR    /* Returning to user mode? */
-       bne     user_work
-       /* Check that preempt_count() == 0 and interrupts are enabled */
-       lwz     r8,TI_PREEMPT(r9)
-       cmpwi   cr1,r8,0
-       ld      r0,SOFTE(r1)
-       cmpdi   r0,0
-       crandc  eq,cr1*4+eq,eq
-       bne     restore
-
-       /*
-        * Here we are preempting the current task. We want to make
-        * sure we are soft-disabled first
-        */
-       SOFT_DISABLE_INTS(r3,r4)
-1:     bl      .preempt_schedule_irq
-
-       /* Re-test flags and eventually loop */
-       clrrdi  r9,r1,THREAD_SHIFT
-       ld      r4,TI_FLAGS(r9)
-       andi.   r0,r4,_TIF_NEED_RESCHED
-       bne     1b
-       b       restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
-
-       andi.   r0,r4,_TIF_NEED_RESCHED
-       beq     1f
-       bl      .restore_interrupts
-       bl      .schedule
-       b       .ret_from_except_lite
-
-1:     bl      .save_nvgprs
-       bl      .restore_interrupts
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .do_notify_resume
-       b       .ret_from_except
-
 unrecov_restore:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .unrecoverable_exception
index 7835a5e1ea5fed3a33bbc7878b0c9e6c26796489..1f017bb7a7cebfc3278fecfd7abe8f3dfaada107 100644 (file)
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
         */
        if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
                __hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAG
+#ifdef CONFIG_TRACE_IRQFLAGS
        else {
                /*
                 * We should already be hard disabled here. We had bugs
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
  * NOTE: This is called with interrupts hard disabled but not marked
  * as such in paca->irq_happened, so we need to resync this.
  */
-void restore_interrupts(void)
+void notrace restore_interrupts(void)
 {
        if (irqs_disabled()) {
                local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -286,6 +286,52 @@ void restore_interrupts(void)
                __hard_irq_enable();
 }
 
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+       /*
+        * First we need to hard disable to ensure no interrupt
+        * occurs before we effectively enter the low power state
+        */
+       hard_irq_disable();
+
+       /*
+        * If anything happened while we were soft-disabled,
+        * we return now and do not enter the low power state.
+        */
+       if (lazy_irq_pending())
+               return false;
+
+       /* Tell lockdep we are about to re-enable */
+       trace_hardirqs_on();
+
+       /*
+        * Mark interrupts as soft-enabled and clear the
+        * PACA_IRQ_HARD_DIS from the pending mask since we
+        * are about to hard enable as well as a side effect
+        * of entering the low power state.
+        */
+       local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+       local_paca->soft_enabled = 1;
+
+       /* Tell the caller to enter the low power state */
+       return true;
+}
+
 #endif /* CONFIG_PPC64 */
 
 int arch_show_interrupts(struct seq_file *p, int prec)
index 0b6d79617d7b08adb56c05286a167931cca6a3a9..2e3200ca485f7bab8aba23f4d02948f74eacee69 100644 (file)
@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
 
 static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
 {
-       if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
-           && entry->jump[1] == 0x396b0000 + (val & 0xffff))
+       if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
+           && entry->jump[1] == 0x398c0000 + (val & 0xffff))
                return 1;
        return 0;
 }
@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location,
                entry++;
        }
 
-       /* Stolen from Paul Mackerras as well... */
-       entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
-       entry->jump[1] = 0x396b0000 + (val&0xffff);     /* addi r11,r11,sym@l*/
-       entry->jump[2] = 0x7d6903a6;                    /* mtctr r11 */
+       entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
+       entry->jump[1] = 0x398c0000 + (val&0xffff);     /* addi r12,r12,sym@l*/
+       entry->jump[2] = 0x7d8903a6;                    /* mtctr r12 */
        entry->jump[3] = 0x4e800420;                    /* bctr */
 
        DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
index 1b488e5305c5f2d1cdfa889f3dfafead154b258c..0794a3017b1b53e65e4d1aa325b79711a9fb04b1 100644 (file)
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
 
 extern char opal_secondary_entry;
 
-static void prom_query_opal(void)
+static void __init prom_query_opal(void)
 {
        long rc;
 
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
        prom_debug("prom_opal_hold_cpus: end...\n");
 }
 
-static void prom_opal_takeover(void)
+static void __init prom_opal_takeover(void)
 {
        struct opal_secondary_data *data = &RELOC(opal_secondary_data);
        struct opal_takeover_args *args = &data->args;
index 651c5963662b68ed098c04d1dbdc512f06f2fac1..5c023c9cf16ee70a7a3b281af2ad9807028c3b13 100644 (file)
@@ -51,16 +51,6 @@ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
         return (void __user *)newsp;
 }
 
-
-/*
- * Restore the user process's signal mask
- */
-void restore_sigmask(sigset_t *set)
-{
-       sigdelsetmask(set, ~_BLOCKABLE);
-       set_current_blocked(set);
-}
-
 static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
                                  int has_handler)
 {
@@ -114,30 +104,21 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
 
 static int do_signal(struct pt_regs *regs)
 {
-       sigset_t *oldset;
+       sigset_t *oldset = sigmask_to_save();
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
        int ret;
        int is32 = is_32bit_task();
 
-       if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
        /* Is there any syscall restart business here ? */
        check_syscall_restart(regs, &ka, signr > 0);
 
        if (signr <= 0) {
-               struct thread_info *ti = current_thread_info();
                /* No signal to deliver -- put the saved sigmask back */
-               if (ti->local_flags & _TLF_RESTORE_SIGMASK) {
-                       ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
-                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-               }
+               restore_saved_sigmask();
                regs->trap = 0;
                return 0;               /* no signals delivered */
        }
@@ -167,18 +148,7 @@ static int do_signal(struct pt_regs *regs)
 
        regs->trap = 0;
        if (ret) {
-               block_sigmask(&ka, signr);
-
-               /*
-                * A signal was successfully delivered; the saved sigmask is in
-                * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
-                */
-               current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
-
-               /*
-                * Let tracing know that we've done the handler setup.
-                */
-               tracehook_signal_handler(signr, &info, &ka, regs,
+               signal_delivered(signr, &info, &ka, regs,
                                         test_thread_flag(TIF_SINGLESTEP));
        }
 
@@ -193,8 +163,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index 8dde973aaaf513ffd4c39a41f0f3bf2bead0d7db..e00acb4139346074ddfbda1c6553010614a2d78c 100644 (file)
 #ifndef _POWERPC_ARCH_SIGNAL_H
 #define _POWERPC_ARCH_SIGNAL_H
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
 
 extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
                                  size_t frame_size, int is_32);
-extern void restore_sigmask(sigset_t *set);
 
 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
                           siginfo_t *info, sigset_t *oldset,
index 61f6aff25edc3f94010bda48710d6a1f27e65161..8b4c049aee20e8604fa83a9a017366213d0ab6d9 100644 (file)
@@ -919,7 +919,7 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
        if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
                return -EFAULT;
 #endif
-       restore_sigmask(&set);
+       set_current_blocked(&set);
        if (restore_user_regs(regs, mcp, sig))
                return -EFAULT;
 
@@ -1273,7 +1273,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
        set.sig[0] = sigctx.oldmask;
        set.sig[1] = sigctx._unused[3];
 #endif
-       restore_sigmask(&set);
+       set_current_blocked(&set);
 
        sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
        addr = sr;
index 2692efdb154e210a5aa787e021ed960cb3bde591..d183f8719a505ce18e4cc8cb06ee4df3e8fa8674 100644 (file)
@@ -335,7 +335,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
 
        if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
                do_exit(SIGSEGV);
-       restore_sigmask(&set);
+       set_current_blocked(&set);
        if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
                do_exit(SIGSEGV);
 
@@ -364,7 +364,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
 
        if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
                goto badframe;
-       restore_sigmask(&set);
+       set_current_blocked(&set);
        if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
                goto badframe;
 
index 99a995c2a3f2496da4e2e48e807b7354488ff2c7..be171ee73bf8cd3bef83630fbc031ad4f29410e7 100644 (file)
@@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs)
        struct pt_regs *old_regs;
        u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
        struct clock_event_device *evt = &__get_cpu_var(decrementers);
+       u64 now;
 
        /* Ensure a positive value is written to the decrementer, or else
         * some CPUs will continue to take decrementer exceptions.
@@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs)
                irq_work_run();
        }
 
-       *next_tb = ~(u64)0;
-       if (evt->event_handler)
-               evt->event_handler(evt);
+       now = get_tb_or_rtc();
+       if (now >= *next_tb) {
+               *next_tb = ~(u64)0;
+               if (evt->event_handler)
+                       evt->event_handler(evt);
+       } else {
+               now = *next_tb - now;
+               if (now <= DECREMENTER_MAX)
+                       set_dec((int)now);
+       }
 
 #ifdef CONFIG_PPC64
        /* collect purr register values often, for accurate calculations */
index c6af1d6238395947725a2e53ff0fbd6d6614b2e7..3abe1b86e58344060361f3efe933b8e119c415e3 100644 (file)
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
        return err;
 }
 
-static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
 {
+       struct kvm *kvm = vcpu->kvm;
        void *va;
        unsigned long nb;
+       unsigned long gpa;
 
-       vpap->update_pending = 0;
-       va = NULL;
-       if (vpap->next_gpa) {
-               va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
-               if (nb < vpap->len) {
-                       /*
-                        * If it's now too short, it must be that userspace
-                        * has changed the mappings underlying guest memory,
-                        * so unregister the region.
-                        */
+       /*
+        * We need to pin the page pointed to by vpap->next_gpa,
+        * but we can't call kvmppc_pin_guest_page under the lock
+        * as it does get_user_pages() and down_read().  So we
+        * have to drop the lock, pin the page, then get the lock
+        * again and check that a new area didn't get registered
+        * in the meantime.
+        */
+       for (;;) {
+               gpa = vpap->next_gpa;
+               spin_unlock(&vcpu->arch.vpa_update_lock);
+               va = NULL;
+               nb = 0;
+               if (gpa)
+                       va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+               spin_lock(&vcpu->arch.vpa_update_lock);
+               if (gpa == vpap->next_gpa)
+                       break;
+               /* sigh... unpin that one and try again */
+               if (va)
                        kvmppc_unpin_guest_page(kvm, va);
-                       va = NULL;
-               }
+       }
+
+       vpap->update_pending = 0;
+       if (va && nb < vpap->len) {
+               /*
+                * If it's now too short, it must be that userspace
+                * has changed the mappings underlying guest memory,
+                * so unregister the region.
+                */
+               kvmppc_unpin_guest_page(kvm, va);
+               va = NULL;
        }
        if (vpap->pinned_addr)
                kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
 
 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = vcpu->kvm;
-
        spin_lock(&vcpu->arch.vpa_update_lock);
        if (vcpu->arch.vpa.update_pending) {
-               kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
                init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
        }
        if (vcpu->arch.dtl.update_pending) {
-               kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
                vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
                vcpu->arch.dtl_index = 0;
        }
        if (vcpu->arch.slb_shadow.update_pending)
-               kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
        spin_unlock(&vcpu->arch.vpa_update_lock);
 }
 
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
        struct kvm_vcpu *vcpu, *vcpu0, *vnext;
        long ret;
        u64 now;
-       int ptid, i;
+       int ptid, i, need_vpa_update;
 
        /* don't start if any threads have a signal pending */
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+       need_vpa_update = 0;
+       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                if (signal_pending(vcpu->arch.run_task))
                        return 0;
+               need_vpa_update |= vcpu->arch.vpa.update_pending |
+                       vcpu->arch.slb_shadow.update_pending |
+                       vcpu->arch.dtl.update_pending;
+       }
+
+       /*
+        * Initialize *vc, in particular vc->vcore_state, so we can
+        * drop the vcore lock if necessary.
+        */
+       vc->n_woken = 0;
+       vc->nap_count = 0;
+       vc->entry_exit_count = 0;
+       vc->vcore_state = VCORE_RUNNING;
+       vc->in_guest = 0;
+       vc->napping_threads = 0;
+
+       /*
+        * Updating any of the vpas requires calling kvmppc_pin_guest_page,
+        * which can't be called with any spinlocks held.
+        */
+       if (need_vpa_update) {
+               spin_unlock(&vc->lock);
+               list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+                       kvmppc_update_vpas(vcpu);
+               spin_lock(&vc->lock);
+       }
 
        /*
         * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
                if (vcpu->arch.ceded)
                        vcpu->arch.ptid = ptid++;
 
-       vc->n_woken = 0;
-       vc->nap_count = 0;
-       vc->entry_exit_count = 0;
-       vc->vcore_state = VCORE_RUNNING;
        vc->stolen_tb += mftb() - vc->preempt_tb;
-       vc->in_guest = 0;
        vc->pcpu = smp_processor_id();
-       vc->napping_threads = 0;
        list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                kvmppc_start_thread(vcpu);
-               if (vcpu->arch.vpa.update_pending ||
-                   vcpu->arch.slb_shadow.update_pending ||
-                   vcpu->arch.dtl.update_pending)
-                       kvmppc_update_vpas(vcpu);
                kvmppc_create_dtl_entry(vcpu, vc);
        }
        /* Grab any remaining hw threads so they can't go into the kernel */
index a84aafce2a129e311a0943be0975db3823b88402..a1044f43becd380cdc7216e082fbf267b97a3fae 100644 (file)
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        lwz     r3,VCORE_NAPPING_THREADS(r5)
        lwz     r4,VCPU_PTID(r9)
        li      r0,1
-       sldi    r0,r0,r4
+       sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
index 3ff9013d6e7914e59f2919f5ee5bf685ad7d7797..ee02b30878ed4bec733af6cbd9aa152eefe22d0f 100644 (file)
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
        case H_PUT_TCE:
                return kvmppc_h_pr_put_tce(vcpu);
        case H_CEDE:
+               vcpu->arch.shared->msr |= MSR_EE;
                kvm_vcpu_block(vcpu);
                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                vcpu->stat.halt_wakeup++;
index 5b63bd3da4a968fab738434d0a83a7db2f722dc8..e779642c25e5e3192a39a4b167d505a8cfb63254 100644 (file)
@@ -333,9 +333,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
                                            unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned int)(long)hcpu;
-#ifdef CONFIG_HOTPLUG_CPU
-       struct task_struct *p;
-#endif
+
        /* We don't touch CPU 0 map, it's allocated at aboot and kept
         * around forever
         */
@@ -358,12 +356,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
                stale_map[cpu] = NULL;
 
                /* We also clear the cpu_vm_mask bits of CPUs going away */
-               read_lock(&tasklist_lock);
-               for_each_process(p) {
-                       if (p->mm)
-                               cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-               }
-               read_unlock(&tasklist_lock);
+               clear_tasks_mm_cpumask(cpu);
        break;
 #endif /* CONFIG_HOTPLUG_CPU */
        }
index b6edbb3b4a54f96aa46a58e603d927c874806449..1e95556dc692e3702f2df518dd4e08e7770ae52e 100644 (file)
@@ -635,11 +635,11 @@ static inline int __init read_usm_ranges(const u32 **usm)
  */
 static void __init parse_drconf_memory(struct device_node *memory)
 {
-       const u32 *dm, *usm;
+       const u32 *uninitialized_var(dm), *usm;
        unsigned int n, rc, ranges, is_kexec_kdump = 0;
        unsigned long lmb_size, base, size, sz;
        int nid;
-       struct assoc_arrays aa;
+       struct assoc_arrays aa = { .arrays = NULL };
 
        n = of_get_drconf_memory(memory, &dm);
        if (!n)
index 55ba3855a97f58093ec06f343cde1df2dffbb17b..7d3a3b5619a2b8f2528b61dc7077b4dd84c594d2 100644 (file)
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
        mr      r4, r_addr;                                     \
        li      r6, SIZE;                                       \
        bl      skb_copy_bits;                                  \
+       nop;                                                    \
        /* R3 = 0 on success */                                 \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
        ld      r0, 16(r1);                                     \
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
        mr      r4, r_addr;                                     \
        li      r5, SIZE;                                       \
        bl      bpf_internal_load_pointer_neg_helper;           \
+       nop;                                                    \
        /* R3 != 0 on success */                                \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
        ld      r0, 16(r1);                                     \
index efdacc829576582a0b4da52384f80ad62082cd4e..d17e98bc0c10d3ff0469cfa8af873f08d57abded 100644 (file)
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
 {
        unsigned long ctrl, thread_switch_control;
 
-       /*
-        * We need to hard disable interrupts, the local_irq_enable() done by
-        * our caller upon return will hard re-enable.
-        */
-       hard_irq_disable();
+       /* Ensure our interrupt state is properly tracked */
+       if (!prep_irq_for_idle())
+               return;
 
        ctrl = mfspr(SPRN_CTRLF);
 
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
         */
        ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
        mtspr(SPRN_CTRLT, ctrl);
+
+       /* Re-enable interrupts in MSR */
+       __hard_irq_enable();
 }
 
 static int cbe_system_reset_exception(struct pt_regs *regs)
index 0915b1ad66ce0dbb9e893ddd54a51753029e8a67..2d311c0caf8ef4499a3a54ebdb24f0c4e41c8735 100644 (file)
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
                tcep++;
        }
 
-       if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+       if (tbl->it_type & TCE_PCI_SWINV_CREATE)
                tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
        return 0;
 }
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
        while (npages--)
                *(tcep++) = 0;
 
-       if (tbl->it_type == TCE_PCI_SWINV_FREE)
+       if (tbl->it_type & TCE_PCI_SWINV_FREE)
                tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 }
 
index 36f957f31842f60688a7485f297ed6181dc926c9..8733a86ad52ed6dbb2b0fc9030b08852a3ca2b62 100644 (file)
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = {
 };
 
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason,
-               const char *old_msgs, unsigned long old_len,
-               const char *new_msgs, unsigned long new_len);
+                         enum kmsg_dump_reason reason);
 
 static struct kmsg_dumper nvram_kmsg_dumper = {
        .dump = oops_to_nvram
@@ -503,28 +501,6 @@ int __init pSeries_nvram_init(void)
        return 0;
 }
 
-/*
- * Try to capture the last capture_len bytes of the printk buffer.  Return
- * the amount actually captured.
- */
-static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
-                               const char *new_msgs, size_t new_len,
-                               char *captured, size_t capture_len)
-{
-       if (new_len >= capture_len) {
-               memcpy(captured, new_msgs + (new_len - capture_len),
-                                                               capture_len);
-               return capture_len;
-       } else {
-               /* Grab the end of old_msgs. */
-               size_t old_tail_len = min(old_len, capture_len - new_len);
-               memcpy(captured, old_msgs + (old_len - old_tail_len),
-                                                               old_tail_len);
-               memcpy(captured + old_tail_len, new_msgs, new_len);
-               return old_tail_len + new_len;
-       }
-}
-
 /*
  * Are we using the ibm,rtas-log for oops/panic reports?  And if so,
  * would logging this oops/panic overwrite an RTAS event that rtas_errd
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void)
                                                NVRAM_RTAS_READ_TIMEOUT);
 }
 
-/* Squeeze out each line's <n> severity prefix. */
-static size_t elide_severities(char *buf, size_t len)
-{
-       char *in, *out, *buf_end = buf + len;
-       /* Assume a <n> at the very beginning marks the start of a line. */
-       int newline = 1;
-
-       in = out = buf;
-       while (in < buf_end) {
-               if (newline && in+3 <= buf_end &&
-                               *in == '<' && isdigit(in[1]) && in[2] == '>') {
-                       in += 3;
-                       newline = 0;
-               } else {
-                       newline = (*in == '\n');
-                       *out++ = *in++;
-               }
-       }
-       return out - buf;
-}
-
 /* Derived from logfs_compress() */
 static int nvram_compress(const void *in, void *out, size_t inlen,
                                                        size_t outlen)
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len)
  * partition.  If that's too much, go back and capture uncompressed text.
  */
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason,
-               const char *old_msgs, unsigned long old_len,
-               const char *new_msgs, unsigned long new_len)
+                         enum kmsg_dump_reason reason)
 {
        static unsigned int oops_count = 0;
        static bool panicking = false;
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
                return;
 
        if (big_oops_buf) {
-               text_len = capture_last_msgs(old_msgs, old_len,
-                       new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
-               text_len = elide_severities(big_oops_buf, text_len);
+               kmsg_dump_get_buffer(dumper, false,
+                                    big_oops_buf, big_oops_buf_sz, &text_len);
                rc = zip_oops(text_len);
        }
        if (rc != 0) {
-               text_len = capture_last_msgs(old_msgs, old_len,
-                               new_msgs, new_len, oops_data, oops_data_sz);
+               kmsg_dump_rewind(dumper);
+               kmsg_dump_get_buffer(dumper, true,
+                                    oops_data, oops_data_sz, &text_len);
                err_type = ERR_TYPE_KERNEL_PANIC;
                *oops_len = (u16) text_len;
        }
index 41a34bc4a9a2903e3d9bb747176cfde009f16435..c71be66bd5dc2ff3bc6e0b3309ad969859cb4b84 100644 (file)
@@ -99,15 +99,18 @@ out:
 static void check_and_cede_processor(void)
 {
        /*
-        * Interrupts are soft-disabled at this point,
-        * but not hard disabled. So an interrupt might have
-        * occurred before entering NAP, and would be potentially
-        * lost (edge events, decrementer events, etc...) unless
-        * we first hard disable then check.
+        * Ensure our interrupt state is properly tracked,
+        * also checks if no interrupt has occurred while we
+        * were soft-disabled
         */
-       hard_irq_disable();
-       if (get_paca()->irq_happened == 0)
+       if (prep_irq_for_idle()) {
                cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+               /* Ensure that H_CEDE returns with IRQs on */
+               if (WARN_ON(!(mfmsr() & MSR_EE)))
+                       __hard_irq_enable();
+#endif
+       }
 }
 
 static int dedicated_cede_loop(struct cpuidle_device *dev,
index 0f3ab06d222260c63d5866e6d6b7ab0ac643e761..eab3492a45c5c5244eca42d58354cf6d8a092836 100644 (file)
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
                /* print cpus waiting or in xmon */
                printf("cpus stopped:");
                count = 0;
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+               for_each_possible_cpu(cpu) {
                        if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
                                if (count == 0)
                                        printf(" %x", cpu);
index b403c533432c94438260df3b524434efdb6db4ce..a39b4690c171621e78e2183c6b1b97bd25f4afaf 100644 (file)
@@ -87,6 +87,7 @@ config S390
        select ARCH_SAVE_PAGE_KEYS if HIBERNATION
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
+       select HAVE_CMPXCHG_LOCAL
        select ARCH_DISCARD_MEMBLOCK
        select ARCH_INLINE_SPIN_TRYLOCK
        select ARCH_INLINE_SPIN_TRYLOCK_BH
index e5beb490959bcea55fce5e2051061db7d0885bea..a6ff5a83e227279fe6e49dd9b6420eeca3400606 100644 (file)
@@ -13,8 +13,6 @@
  *
  */
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
@@ -63,7 +61,7 @@ extern const char _ni_bitmap[];
 extern const char _zb_findmap[];
 extern const char _sb_findmap[];
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define __BITOPS_ALIGN         3
 #define __BITOPS_WORDSIZE      32
@@ -83,7 +81,7 @@ extern const char _sb_findmap[];
                : "d" (__val), "Q" (*(unsigned long *) __addr)  \
                : "cc");
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define __BITOPS_ALIGN         7
 #define __BITOPS_WORDSIZE      64
@@ -103,7 +101,7 @@ extern const char _sb_findmap[];
                : "d" (__val), "Q" (*(unsigned long *) __addr)  \
                : "cc");
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
 #define __BITOPS_BARRIER() asm volatile("" : : : "memory")
@@ -412,7 +410,7 @@ static inline unsigned long __ffz_word_loop(const unsigned long *addr,
        unsigned long bytes = 0;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       ahi     %1,-1\n"
                "       sra     %1,5\n"
                "       jz      1f\n"
@@ -449,7 +447,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
        unsigned long bytes = 0;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       ahi     %1,-1\n"
                "       sra     %1,5\n"
                "       jz      1f\n"
@@ -481,7 +479,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
  */
 static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((word & 0xffffffff) == 0xffffffff) {
                word >>= 32;
                nr += 32;
@@ -505,7 +503,7 @@ static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
  */
 static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((word & 0xffffffff) == 0) {
                word >>= 32;
                nr += 32;
@@ -546,7 +544,7 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
        unsigned long word;
 
        p = (unsigned long *)((unsigned long) p + offset);
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile(
                "       ic      %0,%O1(%R1)\n"
                "       icm     %0,2,%O1+1(%R1)\n"
@@ -834,7 +832,4 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
 
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
 
-
-#endif /* __KERNEL__ */
-
 #endif /* _S390_BITOPS_H */
index fc50a3342da3ba726f35db37df75110897c50478..4c8d4d5b8bd2ca545a1e35dcacaa4f45b115c083 100644 (file)
@@ -10,8 +10,6 @@
 #include <linux/spinlock.h>
 #include <asm/types.h>
 
-#ifdef __KERNEL__
-
 #define LPM_ANYPATH 0xff
 #define __MAX_CSSID 0
 
@@ -291,5 +289,3 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
 int chsc_sstpi(void *page, void *result, size_t size);
 
 #endif
-
-#endif
index 81d7908416cf769202a40541d6a1779560712df5..8d798e962b632c9a8aa426576077a48d1f6f0a38 100644 (file)
@@ -29,7 +29,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
                        "       cs      %0,0,%4\n"
                        "       jl      0b\n"
                        : "=&d" (old), "=Q" (*(int *) addr)
-                       : "d" (x << shift), "d" (~(255 << shift)),
+                       : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
                          "Q" (*(int *) addr) : "memory", "cc", "0");
                return old >> shift;
        case 2:
@@ -44,7 +44,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
                        "       cs      %0,0,%4\n"
                        "       jl      0b\n"
                        : "=&d" (old), "=Q" (*(int *) addr)
-                       : "d" (x << shift), "d" (~(65535 << shift)),
+                       : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
                          "Q" (*(int *) addr) : "memory", "cc", "0");
                return old >> shift;
        case 4:
@@ -113,9 +113,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
                        "       nr      %1,%5\n"
                        "       jnz     0b\n"
                        "1:"
-                       : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
-                       : "d" (old << shift), "d" (new << shift),
-                         "d" (~(255 << shift)), "Q" (*(int *) ptr)
+                       : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+                       : "d" ((old & 0xff) << shift),
+                         "d" ((new & 0xff) << shift),
+                         "d" (~(0xff << shift))
                        : "memory", "cc");
                return prev >> shift;
        case 2:
@@ -134,9 +135,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
                        "       nr      %1,%5\n"
                        "       jnz     0b\n"
                        "1:"
-                       : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
-                       : "d" (old << shift), "d" (new << shift),
-                         "d" (~(65535 << shift)), "Q" (*(int *) ptr)
+                       : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+                       : "d" ((old & 0xffff) << shift),
+                         "d" ((new & 0xffff) << shift),
+                         "d" (~(0xffff << shift))
                        : "memory", "cc");
                return prev >> shift;
        case 4:
@@ -160,9 +162,14 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
        return old;
 }
 
-#define cmpxchg(ptr, o, n)                                             \
-       ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
-                                      (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n)                                              \
+({                                                                      \
+       __typeof__(*(ptr)) __ret;                                        \
+       __ret = (__typeof__(*(ptr)))                                     \
+               __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
+                         sizeof(*(ptr)));                               \
+       __ret;                                                           \
+})
 
 #ifdef CONFIG_64BIT
 #define cmpxchg64(ptr, o, n)                                           \
@@ -181,13 +188,19 @@ static inline unsigned long long __cmpxchg64(void *ptr,
                "       cds     %0,%2,%1"
                : "+&d" (rp_old), "=Q" (ptr)
                : "d" (rp_new), "Q" (ptr)
-               : "cc");
+               : "memory", "cc");
        return rp_old.pair;
 }
-#define cmpxchg64(ptr, o, n)                                           \
-       ((__typeof__(*(ptr)))__cmpxchg64((ptr),                         \
-                                        (unsigned long long)(o),       \
-                                        (unsigned long long)(n)))
+
+#define cmpxchg64(ptr, o, n)                           \
+({                                                     \
+       __typeof__(*(ptr)) __ret;                       \
+       __ret = (__typeof__(*(ptr)))                    \
+               __cmpxchg64((ptr),                      \
+                           (unsigned long long)(o),    \
+                           (unsigned long long)(n));   \
+       __ret;                                          \
+})
 #endif /* CONFIG_64BIT */
 
 #include <asm-generic/cmpxchg-local.h>
@@ -216,8 +229,13 @@ static inline unsigned long __cmpxchg_local(void *ptr,
  * them available.
  */
 #define cmpxchg_local(ptr, o, n)                                       \
-       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
-                       (unsigned long)(n), sizeof(*(ptr))))
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       __ret = (__typeof__(*(ptr)))                                    \
+               __cmpxchg_local((ptr), (unsigned long)(o),              \
+                               (unsigned long)(n), sizeof(*(ptr)));    \
+       __ret;                                                          \
+})
 
 #define cmpxchg64_local(ptr, o, n)     cmpxchg64((ptr), (o), (n))
 
index 24ef186a1c4f6f0b7170834986aa01d320e45163..718374de9c7f3f75f658bbaedb54695760ed25eb 100644 (file)
@@ -21,15 +21,15 @@ typedef unsigned long long __nocast cputime64_t;
 
 static inline unsigned long __div(unsigned long long n, unsigned long base)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        register_pair rp;
 
        rp.pair = n >> 1;
        asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
        return rp.subreg.odd;
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
        return n / base;
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 }
 
 #define cputime_one_jiffy              jiffies_to_cputime(1)
@@ -100,7 +100,7 @@ static inline void cputime_to_timespec(const cputime_t cputime,
                                       struct timespec *value)
 {
        unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        register_pair rp;
 
        rp.pair = __cputime >> 1;
@@ -128,7 +128,7 @@ static inline void cputime_to_timeval(const cputime_t cputime,
                                      struct timeval *value)
 {
        unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        register_pair rp;
 
        rp.pair = __cputime >> 1;
index ecde9417d669f20ef06d1eddab491aaab2ffb076..debfda33d1f86d88a8b3bce89e6b23962c643489 100644 (file)
@@ -7,7 +7,7 @@
 #ifndef __ASM_CTL_REG_H
 #define __ASM_CTL_REG_H
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 
 #define __ctl_load(array, low, high) ({                                \
        typedef struct { char _[sizeof(array)]; } addrtype;     \
@@ -25,7 +25,7 @@
                : "i" (low), "i" (high));                       \
        })
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define __ctl_load(array, low, high) ({                                \
        typedef struct { char _[sizeof(array)]; } addrtype;     \
@@ -43,7 +43,7 @@
                : "i" (low), "i" (high));                       \
        })
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define __ctl_set_bit(cr, bit) ({      \
        unsigned long __dummy;          \
index 83cf36cde2da2cc9356bc12d6d9ad9689cc9d7e1..7a68084ec2f0aa0443ff671048082562ca63c346 100644 (file)
 #ifndef _S390_CURRENT_H
 #define _S390_CURRENT_H
 
-#ifdef __KERNEL__
 #include <asm/lowcore.h>
 
 struct task_struct;
 
 #define current ((struct task_struct *const)S390_lowcore.current_task)
 
-#endif
-
 #endif /* !(_S390_CURRENT_H) */
index c4ee39f7a4d6a12895f6f3a8bf8b862b83ea9e4d..06151e6a309889a16b46e62efbc26798d8b0e42d 100644 (file)
 /*
  * These are used to set parameters in the core dumps.
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define ELF_CLASS      ELFCLASS32
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define ELF_CLASS      ELFCLASS64
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define ELF_DATA       ELFDATA2MSB
 #define ELF_ARCH       EM_S390
 
@@ -181,9 +181,9 @@ extern unsigned long elf_hwcap;
 extern char elf_platform[];
 #define ELF_PLATFORM (elf_platform)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define SET_PERSONALITY(ex) set_personality(PER_LINUX)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define SET_PERSONALITY(ex)                                    \
 do {                                                           \
        if (personality(current->personality) != PER_LINUX32)   \
@@ -194,7 +194,7 @@ do {                                                                \
        else                                                    \
                clear_thread_flag(TIF_31BIT);                   \
 } while (0)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define STACK_RND_MASK 0x7ffUL
 
index 81cf36b691f1dfd42c2ed4f5a48f6bc42a0a7a0e..96bc83ea5c90e0a05f4b959167145b5ef1484e52 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _ASM_S390_FUTEX_H
 #define _ASM_S390_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <asm/errno.h>
@@ -48,5 +46,4 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
 }
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_S390_FUTEX_H */
index aae276d00383cc90660f1e1771d0aee11ae36949..aef0dde340d1f54b866874a9f14061b8237ef4dc 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/cio.h>
 #include <asm/uaccess.h>
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
 #else
 #define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
@@ -33,7 +33,7 @@
 static inline int
 idal_is_needed(void *vaddr, unsigned int length)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        return ((__pa(vaddr) + length - 1) >> 31) != 0;
 #else
        return 0;
@@ -78,7 +78,7 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
 static inline int
 set_normalized_cda(struct ccw1 * ccw, void *vaddr)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        unsigned int nridaws;
        unsigned long *idal;
 
@@ -105,7 +105,7 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
 static inline void
 clear_normalized_cda(struct ccw1 * ccw)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if (ccw->flags & CCW_FLAG_IDA) {
                kfree((void *)(unsigned long) ccw->cda);
                ccw->flags &= ~CCW_FLAG_IDA;
@@ -182,7 +182,7 @@ idal_buffer_free(struct idal_buffer *ib)
 static inline int
 __idal_buffer_is_needed(struct idal_buffer *ib)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        return ib->size > (4096ul << ib->page_order) ||
                idal_is_needed(ib->data[0], ib->size);
 #else
index 27216d317991af2bc7d212731855a5bb2b12e37e..f81a0975cbea0efb88db7b8a123072f125f80e10 100644 (file)
@@ -11,8 +11,6 @@
 #ifndef _S390_IO_H
 #define _S390_IO_H
 
-#ifdef __KERNEL__
-
 #include <asm/page.h>
 
 #define IO_SPACE_LIMIT 0xffffffff
@@ -46,6 +44,4 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
  */
 #define xlate_dev_kmem_ptr(p)  p
 
-#endif /* __KERNEL__ */
-
 #endif
index 5289cacd4861773928e5257b670a98525637460c..2b9d41899d21af3201c71916b27806c511f37267 100644 (file)
@@ -17,7 +17,8 @@ enum interruption_class {
        EXTINT_VRT,
        EXTINT_SCP,
        EXTINT_IUC,
-       EXTINT_CPM,
+       EXTINT_CMS,
+       EXTINT_CMC,
        IOINT_CIO,
        IOINT_QAI,
        IOINT_DAS,
index 3f30dac804ea7ee92808bb5a1f51e16482462951..f4f38826eebb3347ed5c5039db329e1d318b2ef1 100644 (file)
 #ifndef _S390_KEXEC_H
 #define _S390_KEXEC_H
 
-#ifdef __KERNEL__
-#include <asm/page.h>
-#endif
 #include <asm/processor.h>
+#include <asm/page.h>
 /*
  * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  * I.e. Maximum page that is mapped directly into kernel memory,
index 94ec3ee07983f8e9b97c6d857b5e7702bbfa36ee..0a88622339ee363d0663d53390a2106e26e51038 100644 (file)
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
 #include <asm-generic/kmap_types.h>
 
 #endif
-#endif /* __KERNEL__ */
index 5d09e405c54d504a8e3a243b54155d91eb8e18db..69bdf72e95ecfd7fd5134640a48b929952a74747 100644 (file)
@@ -49,7 +49,7 @@ static inline int init_new_context(struct task_struct *tsk,
 
 #define destroy_context(mm)             do { } while (0)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define LCTL_OPCODE "lctl"
 #else
 #define LCTL_OPCODE "lctlg"
index 1cc1c5af705aadb431dbd2dfbced6f25b5b3fa67..f0b6b26b6e59de846b260deef64d3d0b07e0a188 100644 (file)
@@ -28,7 +28,7 @@ struct mod_arch_specific
        struct mod_arch_syminfo *syminfo;
 };
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define ElfW(x) Elf64_ ## x
 #define ELFW(x) ELF64_ ## x
 #else
index d07518af09ea828e1e20973aca47434ebd09ba02..295f2c4f1c96ab2dbd333b6321475796fa8b820b 100644 (file)
@@ -13,7 +13,6 @@
 
 #define OS_INFO_VMCOREINFO     0
 #define OS_INFO_REIPL_BLOCK    1
-#define OS_INFO_INIT_FN                2
 
 struct os_info_entry {
        u64     addr;
@@ -28,8 +27,8 @@ struct os_info {
        u16     version_minor;
        u64     crashkernel_addr;
        u64     crashkernel_size;
-       struct os_info_entry entry[3];
-       u8      reserved[4004];
+       struct os_info_entry entry[2];
+       u8      reserved[4024];
 } __packed;
 
 void os_info_init(void);
index 0fbd1899c7b039fe6924704ab73871fb13f6b53d..6537e72e0853d01473fe57ab1d83257e7487b2af 100644 (file)
@@ -15,7 +15,7 @@
  * per cpu area, use weak definitions to force the compiler to
  * generate external references.
  */
-#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
 #define ARCH_NEEDS_WEAK_PER_CPU
 #endif
 
index 78e3041919dedd11556ed359c40c1b2d875a197c..43078c1943948ca9b801dc60f684ef7025eed616 100644 (file)
@@ -48,7 +48,7 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
        clear_table(crst, entry, sizeof(unsigned long)*2048);
 }
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
@@ -64,7 +64,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 #define pgd_populate(mm, pgd, pud)             BUG()
 #define pud_populate(mm, pud, pmd)             BUG()
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
@@ -106,7 +106,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
        pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
index 011358c1b18e0d1145874eaf5929df48e89ed435..b3227415abdaca94d3f3dcc301cb7ce425cd6a9e 100644 (file)
@@ -74,15 +74,15 @@ static inline int is_zero_pfn(unsigned long pfn)
  * table can map
  * PGDIR_SHIFT determines what a third-level page table entry can map
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 # define PMD_SHIFT     20
 # define PUD_SHIFT     20
 # define PGDIR_SHIFT   20
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 # define PMD_SHIFT     20
 # define PUD_SHIFT     31
 # define PGDIR_SHIFT   42
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define PMD_SIZE        (1UL << PMD_SHIFT)
 #define PMD_MASK        (~(PMD_SIZE-1))
@@ -98,13 +98,13 @@ static inline int is_zero_pfn(unsigned long pfn)
  * that leads to 1024 pte per pgd
  */
 #define PTRS_PER_PTE   256
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PUD   1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define PTRS_PER_PMD   2048
 #define PTRS_PER_PUD   2048
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define PTRS_PER_PGD   2048
 
 #define FIRST_USER_ADDRESS  0
@@ -276,7 +276,7 @@ extern struct page *vmemmap;
  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  */
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 /* Bits in the segment table address-space-control-element */
 #define _ASCE_SPACE_SWITCH     0x80000000UL    /* space switch event       */
@@ -308,7 +308,7 @@ extern struct page *vmemmap;
 #define KVM_UR_BIT     0x00008000UL
 #define KVM_UC_BIT     0x00004000UL
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 /* Bits in the segment/region table address-space-control-element */
 #define _ASCE_ORIGIN           ~0xfffUL/* segment table origin             */
@@ -363,7 +363,7 @@ extern struct page *vmemmap;
 #define KVM_UR_BIT     0x0000800000000000UL
 #define KVM_UC_BIT     0x0000400000000000UL
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 /*
  * A user page table pointer has the space-switch-event bit, the
@@ -424,7 +424,7 @@ static inline int mm_has_pgste(struct mm_struct *mm)
 /*
  * pgd/pmd/pte query functions
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 static inline int pgd_present(pgd_t pgd) { return 1; }
 static inline int pgd_none(pgd_t pgd)    { return 0; }
@@ -434,7 +434,7 @@ static inline int pud_present(pud_t pud) { return 1; }
 static inline int pud_none(pud_t pud)   { return 0; }
 static inline int pud_bad(pud_t pud)    { return 0; }
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 static inline int pgd_present(pgd_t pgd)
 {
@@ -490,7 +490,7 @@ static inline int pud_bad(pud_t pud)
        return (pud_val(pud) & mask) != 0;
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 static inline int pmd_present(pmd_t pmd)
 {
@@ -741,7 +741,7 @@ static inline int pte_young(pte_t pte)
 
 static inline void pgd_clear(pgd_t *pgd)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
                pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
 #endif
@@ -749,7 +749,7 @@ static inline void pgd_clear(pgd_t *pgd)
 
 static inline void pud_clear(pud_t *pud)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
                pud_val(*pud) = _REGION3_ENTRY_EMPTY;
 #endif
@@ -921,7 +921,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
 {
        if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                /* pto must point to the start of the segment table */
                pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
 #else
@@ -1116,7 +1116,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pmd) ({ BUG(); 0UL; })
@@ -1125,7 +1125,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 #define pud_offset(pgd, address) ((pud_t *) pgd)
 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1147,7 +1147,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
        return pmd + pmd_index(address);
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
@@ -1196,7 +1196,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
  *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define __SWP_OFFSET_MASK (~0UL >> 12)
 #else
 #define __SWP_OFFSET_MASK (~0UL >> 11)
@@ -1217,11 +1217,11 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 # define PTE_FILE_MAX_BITS     26
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 # define PTE_FILE_MAX_BITS     59
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define pte_to_pgoff(__pte) \
        ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
index edf8527ff08d9bdf9e5f0468e24faebfdd372c34..7be104c0f19230e157d569efb9a18002dfa4709d 100644 (file)
@@ -24,7 +24,6 @@ typedef unsigned short        __kernel_old_dev_t;
 
 typedef unsigned long   __kernel_ino_t;
 typedef unsigned short  __kernel_mode_t;
-typedef unsigned short  __kernel_nlink_t;
 typedef unsigned short  __kernel_ipc_pid_t;
 typedef unsigned short  __kernel_uid_t;
 typedef unsigned short  __kernel_gid_t;
@@ -35,7 +34,6 @@ typedef int             __kernel_ptrdiff_t;
 
 typedef unsigned int    __kernel_ino_t;
 typedef unsigned int    __kernel_mode_t;
-typedef unsigned int    __kernel_nlink_t;
 typedef int             __kernel_ipc_pid_t;
 typedef unsigned int    __kernel_uid_t;
 typedef unsigned int    __kernel_gid_t;
@@ -47,7 +45,6 @@ typedef unsigned long   __kernel_sigset_t;      /* at least 32 bits */
 
 #define __kernel_ino_t  __kernel_ino_t
 #define __kernel_mode_t __kernel_mode_t
-#define __kernel_nlink_t __kernel_nlink_t
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 #define __kernel_uid_t __kernel_uid_t
 #define __kernel_gid_t __kernel_gid_t
index 6cbf31311673a37bc83de2be5add15bbc42ece5e..20d0585cf905675422ad406d406401eba2bd6f82 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/ptrace.h>
 #include <asm/setup.h>
 
-#ifdef __KERNEL__
 /*
  * Default implementation of macro that returns current
  * instruction pointer ("program counter").
@@ -33,39 +32,33 @@ static inline void get_cpu_id(struct cpuid *ptr)
 }
 
 extern void s390_adjust_jiffies(void);
-extern int get_cpu_capability(unsigned int *);
 extern const struct seq_operations cpuinfo_op;
 extern int sysctl_ieee_emulation_warnings;
 
 /*
  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define TASK_SIZE              (1UL << 31)
 #define TASK_UNMAPPED_BASE     (1UL << 30)
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define TASK_SIZE_OF(tsk)      ((tsk)->mm->context.asce_limit)
 #define TASK_UNMAPPED_BASE     (test_thread_flag(TIF_31BIT) ? \
                                        (1UL << 30) : (1UL << 41))
 #define TASK_SIZE              TASK_SIZE_OF(current)
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define STACK_TOP              (1UL << 31)
 #define STACK_TOP_MAX          (1UL << 31)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define STACK_TOP              (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
 #define STACK_TOP_MAX          (1UL << 42)
-#endif /* __s390x__ */
-
-
-#endif
+#endif /* CONFIG_64BIT */
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
@@ -182,7 +175,7 @@ static inline void psw_set_key(unsigned int key)
  */
 static inline void __load_psw(psw_t psw)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile("lpsw  %0" : : "Q" (psw) : "cc");
 #else
        asm volatile("lpswe %0" : : "Q" (psw) : "cc");
@@ -200,7 +193,7 @@ static inline void __load_psw_mask (unsigned long mask)
 
        psw.mask = mask;
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile(
                "       basr    %0,0\n"
                "0:     ahi     %0,1f-0b\n"
@@ -208,14 +201,14 @@ static inline void __load_psw_mask (unsigned long mask)
                "       lpsw    %1\n"
                "1:"
                : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
        asm volatile(
                "       larl    %0,1f\n"
                "       stg     %0,%O1+8(%R1)\n"
                "       lpswe   %1\n"
                "1:"
                : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 }
 
 /*
@@ -223,7 +216,7 @@ static inline void __load_psw_mask (unsigned long mask)
  */
 static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        if (psw.addr & PSW_ADDR_AMODE)
                /* 31 bit mode */
                return (psw.addr - ilc) | PSW_ADDR_AMODE;
@@ -253,7 +246,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
          * Store status and then load disabled wait psw,
          * the processor is dead afterwards
          */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile(
                "       stctl   0,0,0(%2)\n"
                "       ni      0(%2),0xef\n"   /* switch off protection */
@@ -272,7 +265,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
                "       lpsw    0(%1)"
                : "=m" (ctl_buf)
                : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
        asm volatile(
                "       stctg   0,0,0(%2)\n"
                "       ni      4(%2),0xef\n"   /* switch off protection */
@@ -305,7 +298,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
                "       lpswe   0(%1)"
                : "=m" (ctl_buf)
                : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
        while (1);
 }
 
@@ -338,12 +331,10 @@ extern void (*s390_base_ext_handler_fn)(void);
 
 #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
 
-#endif
-
 /*
  * Helper macro for exception table entries
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define EX_TABLE(_fault,_target)                       \
        ".section __ex_table,\"a\"\n"                   \
        "       .align 4\n"                             \
index d0eb4653cebdb0d7bf0eab014cfb142904ad89b7..1ceee10264c3832bce52f2cae57070d549afc35c 100644 (file)
 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
 #endif
 
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define RWSEM_UNLOCKED_VALUE   0x00000000
 #define RWSEM_ACTIVE_BIAS      0x00000001
 #define RWSEM_ACTIVE_MASK      0x0000ffff
 #define RWSEM_WAITING_BIAS     (-0x00010000)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define RWSEM_UNLOCKED_VALUE   0x0000000000000000L
 #define RWSEM_ACTIVE_BIAS      0x0000000000000001L
 #define RWSEM_ACTIVE_MASK      0x00000000ffffffffL
 #define RWSEM_WAITING_BIAS     (-0x0000000100000000L)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS        (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
@@ -65,19 +63,19 @@ static inline void __down_read(struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ahi     %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       aghi    %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
                : "cc", "memory");
@@ -93,7 +91,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     ltr     %1,%0\n"
                "       jm      1f\n"
@@ -101,7 +99,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
                "       cs      %0,%1,%2\n"
                "       jl      0b\n"
                "1:"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     ltgr    %1,%0\n"
                "       jm      1f\n"
@@ -109,7 +107,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
                "       csg     %0,%1,%2\n"
                "       jl      0b\n"
                "1:"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
                : "cc", "memory");
@@ -125,19 +123,19 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 
        tmp = RWSEM_ACTIVE_WRITE_BIAS;
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       a       %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       ag      %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "m" (tmp)
                : "cc", "memory");
@@ -158,19 +156,19 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
        signed long old;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%1\n"
                "0:     ltr     %0,%0\n"
                "       jnz     1f\n"
                "       cs      %0,%3,%1\n"
                "       jl      0b\n"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%1\n"
                "0:     ltgr    %0,%0\n"
                "       jnz     1f\n"
                "       csg     %0,%3,%1\n"
                "       jl      0b\n"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                "1:"
                : "=&d" (old), "=Q" (sem->count)
                : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -186,19 +184,19 @@ static inline void __up_read(struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ahi     %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       aghi    %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
                : "cc", "memory");
@@ -216,19 +214,19 @@ static inline void __up_write(struct rw_semaphore *sem)
 
        tmp = -RWSEM_ACTIVE_WRITE_BIAS;
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       a       %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       ag      %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "m" (tmp)
                : "cc", "memory");
@@ -246,19 +244,19 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 
        tmp = -RWSEM_WAITING_BIAS;
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       a       %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       ag      %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "m" (tmp)
                : "cc", "memory");
@@ -274,19 +272,19 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ar      %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       agr     %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "d" (delta)
                : "cc", "memory");
@@ -300,24 +298,23 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ar      %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       agr     %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "d" (delta)
                : "cc", "memory");
        return new;
 }
 
-#endif /* __KERNEL__ */
 #endif /* _S390_RWSEM_H */
index 7244e1f6412669f4f0be00e6ec9ab9e31005ed9f..40eb2ff88e9e59766cc7e1ba931e65a303e06a16 100644 (file)
 #include <asm/lowcore.h>
 #include <asm/types.h>
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define IPL_DEVICE        (*(unsigned long *)  (0x10404))
 #define INITRD_START      (*(unsigned long *)  (0x1040C))
 #define INITRD_SIZE       (*(unsigned long *)  (0x10414))
 #define OLDMEM_BASE      (*(unsigned long *)  (0x1041C))
 #define OLDMEM_SIZE      (*(unsigned long *)  (0x10424))
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define IPL_DEVICE        (*(unsigned long *)  (0x10400))
 #define INITRD_START      (*(unsigned long *)  (0x10408))
 #define INITRD_SIZE       (*(unsigned long *)  (0x10410))
 #define OLDMEM_BASE      (*(unsigned long *)  (0x10418))
 #define OLDMEM_SIZE      (*(unsigned long *)  (0x10420))
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define COMMAND_LINE      ((char *)            (0x10480))
 
 #define CHUNK_READ_WRITE 0
@@ -89,7 +89,7 @@ extern unsigned int user_mode;
 
 #define MACHINE_HAS_DIAG9C     (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define MACHINE_HAS_IEEE       (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
 #define MACHINE_HAS_CSP                (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
 #define MACHINE_HAS_IDTE       (0)
@@ -100,7 +100,7 @@ extern unsigned int user_mode;
 #define MACHINE_HAS_PFMF       (0)
 #define MACHINE_HAS_SPP                (0)
 #define MACHINE_HAS_TOPOLOGY   (0)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define MACHINE_HAS_IEEE       (1)
 #define MACHINE_HAS_CSP                (1)
 #define MACHINE_HAS_IDTE       (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
@@ -111,7 +111,7 @@ extern unsigned int user_mode;
 #define MACHINE_HAS_PFMF       (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
 #define MACHINE_HAS_SPP                (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
 #define MACHINE_HAS_TOPOLOGY   (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define ZFCPDUMP_HSA_SIZE      (32UL<<20)
 #define ZFCPDUMP_HSA_SIZE_MAX  (64UL<<20)
@@ -153,19 +153,19 @@ extern void (*_machine_power_off)(void);
 
 #else /* __ASSEMBLY__ */
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define IPL_DEVICE        0x10404
 #define INITRD_START      0x1040C
 #define INITRD_SIZE       0x10414
 #define OLDMEM_BASE      0x1041C
 #define OLDMEM_SIZE      0x10424
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define IPL_DEVICE        0x10400
 #define INITRD_START      0x10408
 #define INITRD_SIZE       0x10410
 #define OLDMEM_BASE      0x10418
 #define OLDMEM_SIZE      0x10420
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define COMMAND_LINE      0x10480
 
 #endif /* __ASSEMBLY__ */
index ca3f8814e3614050d5714843fea3a97c90db8bca..5959bfb3b693ce79def5ccfe2bb548b4dad6378b 100644 (file)
@@ -51,7 +51,7 @@
        wl = __wl;                                      \
 })
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define udiv_qrnnd(q, r, n1, n0, d)                    \
   do { unsigned long __n;                              \
        unsigned int __r, __d;                          \
index cd0241db5a4688b754d6838497cfc06768b4025e..8cc160c9e1cb108c2ce9829dc4c5684a34a3b4ae 100644 (file)
@@ -9,8 +9,6 @@
 #ifndef _S390_STRING_H_
 #define _S390_STRING_H_
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_TYPES_H
 #include <linux/types.h>
 #endif
@@ -152,6 +150,4 @@ size_t strlen(const char *s);
 size_t strnlen(const char * s, size_t n);
 #endif /* !IN_ARCH_STRING_C */
 
-#endif /* __KERNEL__ */
-
 #endif /* __S390_STRING_H_ */
index 003b04edcff6636f1e5c23a04054dbd7f4de749d..4e40b25cd0600e7d76fcdf38789f86fd32275839 100644 (file)
@@ -9,15 +9,13 @@
 #ifndef _ASM_THREAD_INFO_H
 #define _ASM_THREAD_INFO_H
 
-#ifdef __KERNEL__
-
 /*
  * Size of kernel stack for each process
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define THREAD_ORDER 1
 #define ASYNC_ORDER  1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #ifndef __SMALL_STACK
 #define THREAD_ORDER 2
 #define ASYNC_ORDER  2
@@ -25,7 +23,7 @@
 #define THREAD_ORDER 1
 #define ASYNC_ORDER  1
 #endif
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
 #define ASYNC_SIZE  (PAGE_SIZE << ASYNC_ORDER)
@@ -123,8 +121,6 @@ static inline struct thread_info *current_thread_info(void)
 #define is_32bit_task()                (1)
 #endif
 
-#endif /* __KERNEL__ */
-
 #define PREEMPT_ACTIVE         0x4000000
 
 #endif /* _ASM_THREAD_INFO_H */
index e63069ba39e3b274d46d691f62ae74e5d14d4249..15d647901e5cafc14a355325a7ebe25d53da88da 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef _ASM_S390_TIMER_H
 #define _ASM_S390_TIMER_H
 
-#ifdef __KERNEL__
-
 #include <linux/timer.h>
 
 #define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
@@ -50,6 +48,4 @@ extern void vtime_init(void);
 extern void vtime_stop_cpu(void);
 extern void vtime_start_leave(void);
 
-#endif /* __KERNEL__ */
-
 #endif /* _ASM_S390_TIMER_H */
index 775a5eea8f9eb9896e9d38e809dc74d51823d99f..06e5acbc84bd50ef4917eabb5f6fc8cd2d6f6b22 100644 (file)
@@ -106,7 +106,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                unsigned long address)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if (tlb->mm->context.asce_limit <= (1UL << 31))
                return;
        if (!tlb->fullmm)
@@ -125,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                unsigned long address)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if (tlb->mm->context.asce_limit <= (1UL << 42))
                return;
        if (!tlb->fullmm)
index 1d8648cf2fea81eb7a6fdbe5129fd22478b074c2..9fde315f3a7cd42184a54f2174584258076e6a60 100644 (file)
@@ -27,12 +27,12 @@ static inline void __tlb_flush_global(void)
        register unsigned long reg4 asm("4");
        long dummy;
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        if (!MACHINE_HAS_CSP) {
                smp_ptlb_all();
                return;
        }
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
        dummy = 0;
        reg2 = reg3 = 0;
index 05ebbcdbbf6ba7d34791545f3f37355fcb10ef00..6c8c35f8df142b3b8e22dd21d3230a2403dcab38 100644 (file)
@@ -28,7 +28,7 @@ typedef __signed__ long saddr_t;
 
 #ifndef __ASSEMBLY__
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 typedef union {
        unsigned long long pair;
        struct {
@@ -37,7 +37,7 @@ typedef union {
        } subreg;
 } register_pair;
 
-#endif /* ! __s390x__   */
+#endif /* ! CONFIG_64BIT   */
 #endif /* __ASSEMBLY__  */
 #endif /* __KERNEL__    */
 #endif /* _S390_TYPES_H */
index 8f2cada4f7c916d9d88fb87cbb390cf99e4b793e..1f3a79bcd262722e251d575009cec172a9c77509 100644 (file)
 
 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
 
-#define __access_ok(addr, size)        \
-({                             \
-       __chk_user_ptr(addr);   \
-       1;                      \
+static inline int __range_ok(unsigned long addr, unsigned long size)
+{
+       return 1;
+}
+
+#define __access_ok(addr, size)                                \
+({                                                     \
+       __chk_user_ptr(addr);                           \
+       __range_ok((unsigned long)(addr), (size));      \
 })
 
 #define access_ok(type, addr, size) __access_ok(addr, size)
@@ -377,7 +382,7 @@ clear_user(void __user *to, unsigned long n)
 }
 
 extern int memcpy_real(void *, void *, size_t);
-extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
+extern void memcpy_absolute(void *, void *, size_t);
 extern int copy_to_user_real(void __user *dest, void *src, size_t count);
 extern int copy_from_user_real(void *dest, void __user *src, size_t count);
 
index c4a11cfad3c8a55aa1178b5769f82d973d963c66..a73eb2e1e918351356005b99940629235ef6ee98 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __S390_VDSO_H__
 #define __S390_VDSO_H__
 
-#ifdef __KERNEL__
-
 /* Default link addresses for the vDSOs */
 #define VDSO32_LBASE   0
 #define VDSO64_LBASE   0
@@ -45,7 +43,4 @@ void vdso_free_per_cpu(struct _lowcore *lowcore);
 #endif
 
 #endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
 #endif /* __S390_VDSO_H__ */
index 3aa4d00aaf50ec0af3d4581facf40c1cffeef171..c880ff72db44a0247f57c48450a230f682843ec1 100644 (file)
@@ -88,6 +88,9 @@ ENTRY(diag308_reset)
        stctg   %c0,%c15,0(%r4)
        larl    %r4,.Lfpctl             # Floating point control register
        stfpc   0(%r4)
+       larl    %r4,.Lcontinue_psw      # Save PSW flags
+       epsw    %r2,%r3
+       stm     %r2,%r3,0(%r4)
        larl    %r4,.Lrestart_psw       # Setup restart PSW at absolute 0
        lghi    %r3,0
        lg      %r4,0(%r4)              # Save PSW
@@ -103,11 +106,20 @@ ENTRY(diag308_reset)
        lctlg   %c0,%c15,0(%r4)
        larl    %r4,.Lfpctl             # Restore floating point ctl register
        lfpc    0(%r4)
+       larl    %r4,.Lcontinue_psw      # Restore PSW flags
+       lpswe   0(%r4)
+.Lcontinue:
        br      %r14
 .align 16
 .Lrestart_psw:
        .long   0x00080000,0x80000000 + .Lrestart_part2
 
+       .section .data..nosave,"aw",@progbits
+.align 8
+.Lcontinue_psw:
+       .quad   0,.Lcontinue
+       .previous
+
        .section .bss
 .align 8
 .Lctlregs:
index 377c096ca4a72c658327b543127692c1e1a91f0c..3c0c19830c37f5a0f7896d0a292762b794b30a74 100644 (file)
@@ -32,8 +32,6 @@
 #include "compat_ptrace.h"
 #include "entry.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 typedef struct 
 {
        __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
@@ -364,7 +362,6 @@ asmlinkage long sys32_sigreturn(void)
                goto badframe;
        if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs32(regs, &frame->sregs))
                goto badframe;
@@ -390,7 +387,6 @@ asmlinkage long sys32_rt_sigreturn(void)
                goto badframe;
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
                goto badframe;
@@ -572,7 +568,7 @@ give_sigsegv:
  * OK, we're invoking a handler
  */    
 
-int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+void handle_signal32(unsigned long sig, struct k_sigaction *ka,
                    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
        int ret;
@@ -583,8 +579,8 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        else
                ret = setup_frame32(sig, ka, oldset, regs);
        if (ret)
-               return ret;
-       block_sigmask(ka, sig);
-       return 0;
+               return;
+       signal_delivered(sig, info, ka, regs,
+                                test_thread_flag(TIF_SINGLE_STEP));
 }
 
index d84181f1f5e83f4dc82b6099a495bc763e317375..6684fff1755834f14837868248a31a66644f4575 100644 (file)
@@ -237,7 +237,7 @@ static noinline __init void detect_machine_type(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
 }
 
-static __init void early_pgm_check_handler(void)
+static void early_pgm_check_handler(void)
 {
        unsigned long addr;
        const struct exception_table_entry *fixup;
index 6cdddac93a2e48ed47f58d74648c5d858c7d8557..f66a229ab0b3fdf52308cb55c0ab627ef9564a7b 100644 (file)
@@ -31,7 +31,7 @@ void do_per_trap(struct pt_regs *regs);
 void syscall_trace(struct pt_regs *regs, int entryexit);
 void kernel_stack_overflow(struct pt_regs * regs);
 void do_signal(struct pt_regs *regs);
-int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+void handle_signal32(unsigned long sig, struct k_sigaction *ka,
                    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
 void do_notify_resume(struct pt_regs *regs);
 
index e1ac3893e972883e2c17b2787fad0a8857efc8d5..796c976b5fdc1b49a6d82e5a9a7cf19906a9443c 100644 (file)
@@ -85,11 +85,6 @@ startup_kdump_relocated:
        basr    %r13,0
 0:
        mvc     0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
-       mvc     464(16,%r0),.Lpgm_psw-0b(%r13)  # Setup pgm check PSW
-       lhi     %r1,1                           # Start new kernel
-       diag    %r1,%r1,0x308                   # with diag 308
-
-.Lno_diag308:                                  # No diag 308
        sam31                                   # Switch to 31 bit addr mode
        sr      %r1,%r1                         # Erase register r1
        sr      %r2,%r2                         # Erase register r2
@@ -98,8 +93,6 @@ startup_kdump_relocated:
 .align 8
 .Lrestart_psw:
        .long   0x00080000,0x80000000 + startup
-.Lpgm_psw:
-       .quad   0x0000000180000000,0x0000000000000000 + .Lno_diag308
 #else
 .align 2
 .Lep_startup_kdump:
index 8342e65a140daf7bb3fc9f589fbcd1f8d417f322..2f6cfd460cb6ad5a04fd033ea7f515b49ba3e487 100644 (file)
@@ -1528,12 +1528,15 @@ static struct shutdown_action __refdata dump_action = {
 
 static void dump_reipl_run(struct shutdown_trigger *trigger)
 {
-       u32 csum;
-
-       csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
-       copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum));
-       copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual,
-                             sizeof(reipl_block_actual));
+       struct {
+               void    *addr;
+               __u32   csum;
+       } __packed ipib;
+
+       ipib.csum = csum_partial(reipl_block_actual,
+                                reipl_block_actual->hdr.len, 0);
+       ipib.addr = reipl_block_actual;
+       memcpy_absolute(&S390_lowcore.ipib, &ipib, sizeof(ipib));
        dump_run(trigger);
 }
 
@@ -1750,6 +1753,7 @@ static struct kobj_attribute on_restart_attr =
 
 static void __do_restart(void *ignore)
 {
+       __arch_local_irq_stosm(0x04); /* enable DAT */
        smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
        crash_kexec(NULL);
index 8a22c27219dd0748f380a0aef0d63128b01ec21a..b4f4a7133fa10e3456b82f7f27fc8fbf70d942ed 100644 (file)
@@ -42,7 +42,8 @@ static const struct irq_class intrclass_names[] = {
        {.name = "VRT", .desc = "[EXT] Virtio" },
        {.name = "SCP", .desc = "[EXT] Service Call" },
        {.name = "IUC", .desc = "[EXT] IUCV" },
-       {.name = "CPM", .desc = "[EXT] CPU Measurement" },
+       {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" },
+       {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" },
        {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" },
        {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
        {.name = "DAS", .desc = "[I/O] DASD" },
index bdad47d544783d89fa015ee6db34d52a2a555695..cdacf8f91b2d11b7cb3acd683f7a8db830c6fb05 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/ipl.h>
 #include <asm/diag.h>
 #include <asm/asm-offsets.h>
+#include <asm/os_info.h>
 
 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
 
@@ -79,8 +80,8 @@ static void __do_machine_kdump(void *image)
 #ifdef CONFIG_CRASH_DUMP
        int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
 
-       __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
        setup_regs();
+       __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
        start_kdump(1);
 #endif
 }
@@ -114,8 +115,13 @@ static void crash_map_pages(int enable)
               size % KEXEC_CRASH_MEM_ALIGN);
        if (enable)
                vmem_add_mapping(crashk_res.start, size);
-       else
+       else {
                vmem_remove_mapping(crashk_res.start, size);
+               if (size)
+                       os_info_crashkernel_add(crashk_res.start, size);
+               else
+                       os_info_crashkernel_add(0, 0);
+       }
 }
 
 /*
@@ -208,6 +214,7 @@ static void __machine_kexec(void *data)
 {
        struct kimage *image = data;
 
+       __arch_local_irq_stosm(0x04); /* enable DAT */
        pfault_fini();
        tracing_off();
        debug_locks_off();
index e8d6c214d498a0aaf5037f7d64c5fd7e6306fbc9..95fa5ac6c4cedbf6d287ca25708b906cb1f35c4c 100644 (file)
@@ -60,7 +60,7 @@ void __init os_info_init(void)
        os_info.version_minor = OS_INFO_VERSION_MINOR;
        os_info.magic = OS_INFO_MAGIC;
        os_info.csum = os_info_csum(&os_info);
-       copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr));
+       memcpy_absolute(&S390_lowcore.os_info, &ptr, sizeof(ptr));
 }
 
 #ifdef CONFIG_CRASH_DUMP
@@ -138,7 +138,6 @@ static void os_info_old_init(void)
                goto fail_free;
        os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
        os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
-       os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE);
        pr_info("crashkernel: addr=0x%lx size=%lu\n",
                (unsigned long) os_info_old->crashkernel_addr,
                (unsigned long) os_info_old->crashkernel_size);
index cb019f429e88ba22745a14bf38c714743c2383ad..9871b1971ed7602a7efef88fc62d019dde3b98c4 100644 (file)
@@ -225,7 +225,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
        if (!(alert & CPU_MF_INT_CF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
        cpuhw = &__get_cpu_var(cpu_hw_events);
 
        /* Measurement alerts are shared and might happen when the PMU
index 06264ae8ccd9e05fd54f166d4d2bdaa37df18441..489d1d8d96b068f63b61886ee3c55d50f52b3913 100644 (file)
@@ -428,10 +428,12 @@ static void __init setup_lowcore(void)
        lc->restart_fn = (unsigned long) do_restart;
        lc->restart_data = 0;
        lc->restart_source = -1UL;
-       memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
-              4*sizeof(unsigned long));
-       copy_to_absolute_zero(&S390_lowcore.restart_psw,
-                             &lc->restart_psw, sizeof(psw_t));
+
+       /* Setup absolute zero lowcore */
+       memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack,
+                       4 * sizeof(unsigned long));
+       memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw,
+                       sizeof(lc->restart_psw));
 
        set_prefix((u32)(unsigned long) lc);
        lowcore_ptr[0] = lc;
@@ -598,7 +600,7 @@ static void __init setup_vmcoreinfo(void)
 #ifdef CONFIG_KEXEC
        unsigned long ptr = paddr_vmcoreinfo_note();
 
-       copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
+       memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
 #endif
 }
 
index f626232e216c0ca100f14c1cbeed52c8696c578f..ac565b44aabbf76c23da00540f8f2884e06f07a6 100644 (file)
@@ -33,9 +33,6 @@
 #include <asm/switch_to.h>
 #include "entry.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-
 typedef struct 
 {
        __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
@@ -169,7 +166,6 @@ SYSCALL_DEFINE0(sigreturn)
                goto badframe;
        if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs(regs, &frame->sregs))
                goto badframe;
@@ -189,7 +185,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
                goto badframe;
        if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs(regs, &frame->uc.uc_mcontext))
                goto badframe;
@@ -367,7 +362,7 @@ give_sigsegv:
        return -EFAULT;
 }
 
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+static void handle_signal(unsigned long sig, struct k_sigaction *ka,
                         siginfo_t *info, sigset_t *oldset,
                         struct pt_regs *regs)
 {
@@ -379,9 +374,9 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
        else
                ret = setup_frame(sig, ka, oldset, regs);
        if (ret)
-               return ret;
-       block_sigmask(ka, sig);
-       return 0;
+               return;
+       signal_delivered(sig, info, ka, regs,
+                                test_thread_flag(TIF_SINGLE_STEP));
 }
 
 /*
@@ -398,12 +393,7 @@ void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
+       sigset_t *oldset = sigmask_to_save();
 
        /*
         * Get signal to deliver. When running under ptrace, at this point
@@ -441,24 +431,10 @@ void do_signal(struct pt_regs *regs)
                /* No longer in a system call */
                clear_thread_flag(TIF_SYSCALL);
 
-               if ((is_compat_task() ?
-                    handle_signal32(signr, &ka, &info, oldset, regs) :
-                    handle_signal(signr, &ka, &info, oldset, regs)) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       /*
-                        * Let tracing know that we've done the handler setup.
-                        */
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                        test_thread_flag(TIF_SINGLE_STEP));
-               }
+               if (is_compat_task())
+                       handle_signal32(signr, &ka, &info, oldset, regs);
+               else
+                       handle_signal(signr, &ka, &info, oldset, regs);
                return;
        }
 
@@ -484,16 +460,11 @@ void do_signal(struct pt_regs *regs)
        /*
         * If there's no signal to deliver, we just put the saved sigmask back.
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs)
 {
        clear_thread_flag(TIF_NOTIFY_RESUME);
        tracehook_notify_resume(regs);
-       if (current->replacement_session_keyring)
-               key_replace_session_keyring();
 }
index 647ba9425893de446e5237d84234c612ab5fedc9..15cca26ccb6c4ff1cbde51c60259a7ec95471718 100644 (file)
@@ -297,26 +297,27 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
                          void *data, unsigned long stack)
 {
-       struct _lowcore *lc = pcpu->lowcore;
-       unsigned short this_cpu;
+       struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
+       struct {
+               unsigned long   stack;
+               void            *func;
+               void            *data;
+               unsigned long   source;
+       } restart = { stack, func, data, stap() };
 
        __load_psw_mask(psw_kernel_bits);
-       this_cpu = stap();
-       if (pcpu->address == this_cpu)
+       if (pcpu->address == restart.source)
                func(data);     /* should not return */
        /* Stop target cpu (if func returns this stops the current cpu). */
        pcpu_sigp_retry(pcpu, sigp_stop, 0);
        /* Restart func on the target cpu and stop the current cpu. */
-       lc->restart_stack = stack;
-       lc->restart_fn = (unsigned long) func;
-       lc->restart_data = (unsigned long) data;
-       lc->restart_source = (unsigned long) this_cpu;
+       memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
        asm volatile(
                "0:     sigp    0,%0,6  # sigp restart to target cpu\n"
                "       brc     2,0b    # busy, try again\n"
                "1:     sigp    0,%1,5  # sigp stop to current cpu\n"
                "       brc     2,1b    # busy, try again\n"
-               : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
+               : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
        for (;;) ;
 }
 
@@ -800,17 +801,6 @@ void __noreturn cpu_die(void)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void smp_call_os_info_init_fn(void)
-{
-       int (*init_fn)(void);
-       unsigned long size;
-
-       init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size);
-       if (!init_fn)
-               return;
-       init_fn();
-}
-
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        /* request the 0x1201 emergency signal external interrupt */
@@ -819,7 +809,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        /* request the 0x1202 external call external interrupt */
        if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
                panic("Couldn't request external interrupt 0x1202");
-       smp_call_os_info_init_fn();
        smp_detect_cpus();
 }
 
@@ -943,19 +932,6 @@ static struct attribute_group cpu_common_attr_group = {
        .attrs = cpu_common_attrs,
 };
 
-static ssize_t show_capability(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       unsigned int capability;
-       int rc;
-
-       rc = get_cpu_capability(&capability);
-       if (rc)
-               return rc;
-       return sprintf(buf, "%u\n", capability);
-}
-static DEVICE_ATTR(capability, 0444, show_capability, NULL);
-
 static ssize_t show_idle_count(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
@@ -993,7 +969,6 @@ static ssize_t show_idle_time(struct device *dev,
 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 
 static struct attribute *cpu_online_attrs[] = {
-       &dev_attr_capability.attr,
        &dev_attr_idle_count.attr,
        &dev_attr_idle_time_us.attr,
        NULL,
index 2a94b774695c069241ed413f50a0f9b8b183c656..fa0eb238dac7d8940321c239d970d1245078d219 100644 (file)
@@ -392,27 +392,6 @@ static __init int create_proc_service_level(void)
 }
 subsys_initcall(create_proc_service_level);
 
-/*
- * Bogomips calculation based on cpu capability.
- */
-int get_cpu_capability(unsigned int *capability)
-{
-       struct sysinfo_1_2_2 *info;
-       int rc;
-
-       info = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!info)
-               return -ENOMEM;
-       rc = stsi(info, 1, 2, 2);
-       if (rc == -ENOSYS)
-               goto out;
-       rc = 0;
-       *capability = info->capability;
-out:
-       free_page((unsigned long) info);
-       return rc;
-}
-
 /*
  * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
  */
index 60455f104ea36ee9d3a22fd7100031c21f94866e..58a75a8ae90ce7beae1a4553801c47dd544cd1ee 100644 (file)
@@ -14,7 +14,7 @@
 #include <asm/futex.h>
 #include "uaccess.h"
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define AHI    "ahi"
 #define ALR    "alr"
 #define CLR    "clr"
index bb1a7eed42ce4cbef8350dca7a8eb85269fb1966..57e94298539b51326ff1d9a4a10a9ef36c405326 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/futex.h>
 #include "uaccess.h"
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define AHI    "ahi"
 #define ALR    "alr"
 #define CLR    "clr"
index 795a0a9bb2eba72875aa3a300f98929daa1c6c12..921fa541dc0431050dfc6b429b4836b94972d6a0 100644 (file)
@@ -101,19 +101,27 @@ int memcpy_real(void *dest, void *src, size_t count)
 }
 
 /*
- * Copy memory to absolute zero
+ * Copy memory in absolute mode (kernel to kernel)
  */
-void copy_to_absolute_zero(void *dest, void *src, size_t count)
+void memcpy_absolute(void *dest, void *src, size_t count)
 {
-       unsigned long cr0;
+       unsigned long cr0, flags, prefix;
 
-       BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
-       preempt_disable();
+       flags = arch_local_irq_save();
        __ctl_store(cr0, 0, 0);
        __ctl_clear_bit(0, 28); /* disable lowcore protection */
-       memcpy_real(dest + store_prefix(), src, count);
+       prefix = store_prefix();
+       if (prefix) {
+               local_mcck_disable();
+               set_prefix(0);
+               memcpy(dest, src, count);
+               set_prefix(prefix);
+               local_mcck_enable();
+       } else {
+               memcpy(dest, src, count);
+       }
        __ctl_load(cr0, 0, 0);
-       preempt_enable();
+       arch_local_irq_restore(flags);
 }
 
 /*
@@ -187,20 +195,6 @@ static int is_swapped(unsigned long addr)
        return 0;
 }
 
-/*
- * Return swapped prefix or zero page address
- */
-static unsigned long get_swapped(unsigned long addr)
-{
-       unsigned long prefix = store_prefix();
-
-       if (addr < sizeof(struct _lowcore))
-               return addr + prefix;
-       if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
-               return addr - prefix;
-       return addr;
-}
-
 /*
  * Convert a physical pointer for /dev/mem access
  *
@@ -218,7 +212,7 @@ void *xlate_dev_mem_ptr(unsigned long addr)
                size = PAGE_SIZE - (addr & ~PAGE_MASK);
                bounce = (void *) __get_free_page(GFP_ATOMIC);
                if (bounce)
-                       memcpy_real(bounce, (void *) get_swapped(addr), size);
+                       memcpy_absolute(bounce, (void *) addr, size);
        }
        preempt_enable();
        put_online_cpus();
index 4799383e2df9551c45ad69f08f57455bb9771dc0..71ae20df674e53f051834f20319f3c48fae1358b 100644 (file)
@@ -109,7 +109,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
                pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
                pm_dir = pmd_offset(pu_dir, address);
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
                if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
                    (address + HPAGE_SIZE <= start + size) &&
                    (address >= HPAGE_SIZE)) {
index c6646de07bf455acb80c3c94674cdc61c535677c..a4a89fa980d6c4e0e8e44a00e8519d9ead2ac2b1 100644 (file)
@@ -235,7 +235,7 @@ static void hws_ext_handler(struct ext_code ext_code,
        if (!(param32 & CPU_MF_INT_SF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
        atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
 
        if (hws_wq)
index d4a49011c48a58d4311b182f6015efeb67a55e6d..e382c52ca0d90b455d9bee68f7587f4278d36bc8 100644 (file)
@@ -34,8 +34,6 @@
 #include <asm/syscalls.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct rt_sigframe {
        u32 rs_ass[4];          /* argument save space */
        u32 rs_code[2];         /* signal trampoline */
@@ -162,7 +160,6 @@ score_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
@@ -241,11 +238,9 @@ give_sigsegv:
        return -EFAULT;
 }
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-       struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+       struct k_sigaction *ka, struct pt_regs *regs)
 {
-       int ret;
-
        if (regs->is_syscall) {
                switch (regs->regs[4]) {
                case ERESTART_RESTARTBLOCK:
@@ -269,18 +264,15 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        /*
         * Set up the stack frame
         */
-       ret = setup_rt_frame(ka, regs, sig, oldset, info);
-
-       if (ret == 0)
-               block_sigmask(ka, sig);
+       if (setup_rt_frame(ka, regs, sig, sigmask_to_save(), info) < 0)
+               return;
 
-       return ret;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 static void do_signal(struct pt_regs *regs)
 {
        struct k_sigaction ka;
-       sigset_t *oldset;
        siginfo_t info;
        int signr;
 
@@ -292,25 +284,10 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
 
@@ -337,10 +314,7 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -356,7 +330,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 99bcd0ee838d6d1fe743dfd26ee31606fb05d23d..31d9db7913e4352a10680f76f2aaa8a5e800eef5 100644 (file)
@@ -32,6 +32,8 @@ config SUPERH
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_CLOCKEVENTS
        select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
        help
          The SuperH is a RISC processor targeted for use in embedded systems
          and consumer electronics; it was also used in the Sega Dreamcast
index 46edf070da1c377574d8ce17e52a74416f9c896d..aed701c7b11bb0208ffc3cf50efe2fb0ba0673f6 100644 (file)
@@ -9,6 +9,12 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 #
+ifneq ($(SUBARCH),$(ARCH))
+  ifeq ($(CROSS_COMPILE),)
+    CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux-  $(UTS_MACHINE)-linux-gnu-  $(UTS_MACHINE)-unknown-linux-gnu-)
+  endif
+endif
+
 isa-y                                  := any
 isa-$(CONFIG_SH_DSP)                   := sh
 isa-$(CONFIG_CPU_SH2)                  := sh2
@@ -106,19 +112,13 @@ LDFLAGS_vmlinux           += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
 KBUILD_DEFCONFIG       := cayman_defconfig
 endif
 
-ifneq ($(SUBARCH),$(ARCH))
-  ifeq ($(CROSS_COMPILE),)
-    CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux-  $(UTS_MACHINE)-linux-gnu-  $(UTS_MACHINE)-unknown-linux-gnu-)
-  endif
-endif
-
 ifdef CONFIG_CPU_LITTLE_ENDIAN
 ld-bfd                 := elf32-$(UTS_MACHINE)-linux
-LDFLAGS_vmlinux                += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd)
+LDFLAGS_vmlinux                += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
 LDFLAGS                        += -EL
 else
 ld-bfd                 := elf32-$(UTS_MACHINE)big-linux
-LDFLAGS_vmlinux                += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd)
+LDFLAGS_vmlinux                += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
 LDFLAGS                        += -EB
 endif
 
index 158c9176e42adc79fadeb78300ec14051a2a3801..43a179ce9afcb0bdcabc8f116901e27b7d4c02eb 100644 (file)
@@ -201,8 +201,8 @@ static struct resource kfr2r09_usb0_gadget_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
-               .start  = evtirq(0xa20),
-               .end    = evtirq(0xa20),
+               .start  = evt2irq(0xa20),
+               .end    = evt2irq(0xa20),
                .flags  = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
        },
 };
index 34cd0c5ff2e1bd104e6de9ff407ff478d2bb1aa7..a8a1ca741c8599b975420dd1a796a573a33e40bc 100644 (file)
@@ -188,7 +188,6 @@ static struct platform_nand_data migor_nand_flash_data = {
                .partitions = migor_nand_flash_partitions,
                .nr_partitions = ARRAY_SIZE(migor_nand_flash_partitions),
                .chip_delay = 20,
-               .part_probe_types = (const char *[]) { "cmdlinepart", NULL },
        },
        .ctrl = {
                .dev_ready = migor_nand_flash_ready,
index c045142f73385978cd8e740ebb235ca40c40ad02..9e702f2f80452a222e08f490f9095c948b1143e4 100644 (file)
@@ -239,7 +239,7 @@ static int __init pcie_clk_init(struct sh7786_pcie_port *port)
        clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
        clk->enable_bit = BITS_CKE;
 
-       ret = sh_clk_mstp32_register(clk, 1);
+       ret = sh_clk_mstp_register(clk, 1);
        if (unlikely(ret < 0))
                goto err_phy;
 
index 7beb42322f60059368590c80439617a6be614460..7b673ddcd5551cc887cbd393c331056c6c1cc20b 100644 (file)
@@ -1,5 +1,39 @@
 include include/asm-generic/Kbuild.asm
 
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += siginfo.h
+generic-y += sizes.h
+generic-y += socket.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += ucontext.h
+generic-y += xor.h
+
 header-y += cachectl.h
 header-y += cpu-features.h
 header-y += hw_breakpoint.h
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h
deleted file mode 100644 (file)
index 6dc0bb0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h
deleted file mode 100644 (file)
index 6ca395d..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SH_CPUTIME_H
-#define __SH_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SH_CPUTIME_H */
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h
deleted file mode 100644 (file)
index 4c51401..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
deleted file mode 100644 (file)
index 9670e12..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/delay.h>
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h
deleted file mode 100644 (file)
index 6cd978c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 108d8c4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h
deleted file mode 100644 (file)
index 51cf6f9..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_ERRNO_H
-#define __ASM_SH_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* __ASM_SH_ERRNO_H */
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h
deleted file mode 100644 (file)
index 46ab12d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
index e136d28d1d2ee5ad97f75866f9e9f3e4d118eba9..4d48f1436a63b72a34f8201ad8cb88416bc1a37d 100644 (file)
@@ -19,9 +19,20 @@ static inline u32 inl(unsigned long addr)
        return -1;
 }
 
-#define outb(x, y)     BUG()
-#define outw(x, y)     BUG()
-#define outl(x, y)     BUG()
+static inline void outb(unsigned char x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outw(unsigned short x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outl(unsigned int x, unsigned long port)
+{
+       BUG();
+}
 
 #define inb_p(addr)    inb(addr)
 #define inw_p(addr)    inw(addr)
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h
deleted file mode 100644 (file)
index b279fe0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h
deleted file mode 100644 (file)
index 84c7e51..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
deleted file mode 100644 (file)
index 14fab8f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h
deleted file mode 100644 (file)
index 9ed9b9c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_LOCAL_H
-#define __ASM_SH_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* __ASM_SH_LOCAL_H */
-
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h
deleted file mode 100644 (file)
index 8eebf89..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h
deleted file mode 100644 (file)
index 809134c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h
deleted file mode 100644 (file)
index 965d454..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h
deleted file mode 100644 (file)
index cf252af..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h
deleted file mode 100644 (file)
index 4db4b39..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARCH_SH_PERCPU
-#define __ARCH_SH_PERCPU
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ARCH_SH_PERCPU */
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h
deleted file mode 100644 (file)
index c98509d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
index abda58467ece9e86ff1249029bcc7143281b2f04..ba0bdc423b072fa62f74fbc64e1bc5b683f2af7d 100644 (file)
@@ -3,8 +3,6 @@
 
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 typedef unsigned short __kernel_uid_t;
index fcda07b4a616be8196f105ce5d2faee8682c9af1..244f7e950e176b0cbdc907f70b4fdf88572b08f0 100644 (file)
@@ -3,8 +3,6 @@
 
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 typedef unsigned short __kernel_uid_t;
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h
deleted file mode 100644 (file)
index 9c2499a..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_RESOURCE_H
-#define __ASM_SH_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* __ASM_SH_RESOURCE_H */
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
deleted file mode 100644 (file)
index 98dfc35..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SCATTERLIST_H
-#define __ASM_SH_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_SH_SCATTERLIST_H */
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h
deleted file mode 100644 (file)
index 7673b83..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h
deleted file mode 100644 (file)
index a0cb0ca..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h
deleted file mode 100644 (file)
index 83c05fc..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h
deleted file mode 100644 (file)
index 813040e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SIGINFO_H
-#define __ASM_SH_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif /* __ASM_SH_SIGINFO_H */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
deleted file mode 100644 (file)
index dd248c2..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sizes.h>
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h
deleted file mode 100644 (file)
index 6b71384..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h
deleted file mode 100644 (file)
index 9202a02..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_STATFS_H
-#define __ASM_SH_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* __ASM_SH_STATFS_H */
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h
deleted file mode 100644 (file)
index 3935b10..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h
deleted file mode 100644 (file)
index 280d78a..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
index 0c04ffc4f12c41d344eded872aea06aaf7232e2b..bc13b57cdc834210b95aaa70ea603298ed55f468 100644 (file)
@@ -169,7 +169,7 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
 }
 
 #define TI_FLAG_FAULT_CODE_SHIFT       24
@@ -189,6 +189,23 @@ static inline unsigned int get_thread_fault_code(void)
        struct thread_info *ti = current_thread_info();
        return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
 }
+
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
+}
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index 050f221fa898219b40e2faca0bbb9cebae93dd7e..8698a80ed00c1fcbebed2ec993887f104179d384 100644 (file)
@@ -25,6 +25,8 @@
        (__chk_user_ptr(addr),          \
         __access_ok((unsigned long __force)(addr), (size)))
 
+#define user_addr_max()        (current_thread_info()->addr_limit.seg)
+
 /*
  * Uh, these should become the main single-value transfer routines ...
  * They automatically use the right size if we just have the right
@@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; };
 # include "uaccess_64.h"
 #endif
 
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 /* Generic arbitrary sized copy.  */
 /* Return the number of bytes NOT copied */
 __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
@@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
        __cl_size;                                                      \
 })
 
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-#define strncpy_from_user(dest,src,count)                              \
-({                                                                     \
-       unsigned long __sfu_src = (unsigned long)(src);                 \
-       int __sfu_count = (int)(count);                                 \
-       long __sfu_res = -EFAULT;                                       \
-                                                                       \
-       if (__access_ok(__sfu_src, __sfu_count))                        \
-               __sfu_res = __strncpy_from_user((unsigned long)(dest),  \
-                               __sfu_src, __sfu_count);                \
-                                                                       \
-       __sfu_res;                                                      \
-})
-
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
@@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        return __copy_size;
 }
 
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-static inline long strnlen_user(const char __user *s, long n)
-{
-       if (!__addr_ok(s))
-               return 0;
-       else
-               return __strnlen_user(s, n);
-}
-
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str)       strnlen_user(str, ~0UL >> 1)
-
 /*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
index ae0d24f6653f8ee1979d4d369b5e8fac9480beb1..c0de7ee35ab7aa5ba80c393f4a2588102d01e2a1 100644 (file)
@@ -170,79 +170,4 @@ __asm__ __volatile__( \
 
 extern void __put_user_unknown(void);
 
-static inline int
-__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
-{
-       __kernel_size_t res;
-       unsigned long __dummy, _d, _s, _c;
-
-       __asm__ __volatile__(
-               "9:\n"
-               "mov.b  @%2+, %1\n\t"
-               "cmp/eq #0, %1\n\t"
-               "bt/s   2f\n"
-               "1:\n"
-               "mov.b  %1, @%3\n\t"
-               "dt     %4\n\t"
-               "bf/s   9b\n\t"
-               " add   #1, %3\n\t"
-               "2:\n\t"
-               "sub    %4, %0\n"
-               "3:\n"
-               ".section .fixup,\"ax\"\n"
-               "4:\n\t"
-               "mov.l  5f, %1\n\t"
-               "jmp    @%1\n\t"
-               " mov   %9, %0\n\t"
-               ".balign 4\n"
-               "5:     .long 3b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               "       .balign 4\n"
-               "       .long 9b,4b\n"
-               ".previous"
-               : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
-               : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
-                 "i" (-EFAULT)
-               : "memory", "t");
-
-       return res;
-}
-
-/*
- * Return the size of a string (including the ending 0 even when we have
- * exceeded the maximum string length).
- */
-static inline long __strnlen_user(const char __user *__s, long __n)
-{
-       unsigned long res;
-       unsigned long __dummy;
-
-       __asm__ __volatile__(
-               "1:\t"
-               "mov.b  @(%0,%3), %1\n\t"
-               "cmp/eq %4, %0\n\t"
-               "bt/s   2f\n\t"
-               " add   #1, %0\n\t"
-               "tst    %1, %1\n\t"
-               "bf     1b\n\t"
-               "2:\n"
-               ".section .fixup,\"ax\"\n"
-               "3:\n\t"
-               "mov.l  4f, %1\n\t"
-               "jmp    @%1\n\t"
-               " mov   #0, %0\n"
-               ".balign 4\n"
-               "4:     .long 2b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               "       .balign 4\n"
-               "       .long 1b,3b\n"
-               ".previous"
-               : "=z" (res), "=&r" (__dummy)
-               : "0" (0), "r" (__s), "r" (__n)
-               : "t");
-       return res;
-}
-
 #endif /* __ASM_SH_UACCESS_32_H */
index 56fd20b8cdcc16db8fbe6da757da62dec0d1d4d9..2e07e0f40c6af3038ca6b69fecfefefe528dc2b8 100644 (file)
@@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long);
 extern long __put_user_asm_q(void *, long);
 extern void __put_user_unknown(void);
 
-extern long __strnlen_user(const char *__s, long __n);
-extern int __strncpy_from_user(unsigned long __dest,
-              unsigned long __user __src, int __count);
-
 #endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h
deleted file mode 100644 (file)
index 9bc07b9..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h
new file mode 100644 (file)
index 0000000..6e38953
--- /dev/null
@@ -0,0 +1,53 @@
+#ifndef __ASM_SH_WORD_AT_A_TIME_H
+#define __ASM_SH_WORD_AT_A_TIME_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+# include <asm-generic/word-at-a-time.h>
+#else
+/*
+ * Little-endian version cribbed from x86.
+ */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+       /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+       long a = (0x0ff0001+mask) >> 23;
+       /* Fix the 1 for 00 case */
+       return a & mask;
+}
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+       return count_masked_bytes(mask);
+}
+#endif
+
+#endif
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h
deleted file mode 100644 (file)
index c82eb12..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
deleted file mode 100644 (file)
index 1192e1c..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * SH-2A UBC definitions
- *
- * Copyright (C) 2008 Kieran Bingham
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef __ASM_CPU_SH2A_UBC_H
-#define __ASM_CPU_SH2A_UBC_H
-
-#define UBC_BARA                0xfffc0400
-#define UBC_BAMRA               0xfffc0404
-#define UBC_BBRA                0xfffc04a0     /* 16 bit access */
-#define UBC_BDRA                0xfffc0408
-#define UBC_BDMRA               0xfffc040c
-
-#define UBC_BARB                0xfffc0410
-#define UBC_BAMRB               0xfffc0414
-#define UBC_BBRB                0xfffc04b0     /* 16 bit access */
-#define UBC_BDRB                0xfffc0418
-#define UBC_BDMRB               0xfffc041c
-
-#define UBC_BRCR                0xfffc04c0
-
-#endif /* __ASM_CPU_SH2A_UBC_H */
index 8832c526cdf92d68cb8b59a48d2a05d8942ab181..c4a0336660dd102d6542b3f3a5721214439dce08 100644 (file)
@@ -2,7 +2,7 @@
 #include <linux/serial_core.h>
 #include <linux/io.h>
 #include <cpu/serial.h>
-#include <asm/gpio.h>
+#include <cpu/gpio.h>
 
 static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
 {
index ea01a72f1b94f16892734df4a336e4b1f756f7c1..53638e231cd02f91bcb7678a9e1990de6338b8fd 100644 (file)
@@ -283,7 +283,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 7ac07b4f75de3c01982c5a4c2df50bb67cae8a86..22e485d1990b598c698c1f34344589f01a2b7683 100644 (file)
@@ -276,7 +276,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 8e1f97010c0d7ac9c792c23a5b09197508caeed5..c4cb740e4d10180d3034526d18cb0457b81ca301 100644 (file)
@@ -261,7 +261,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 35f75cf0c7e57b5dfe02f1fb7dc93e1156634016..37c41c7747a3b73a2a70400a79dab3e8a07a4b7c 100644 (file)
@@ -311,7 +311,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 2a87901673febed3df329d47d8bf21acdabb4530..c87e78f73234f453a7fd758dd14b053e844c2408 100644 (file)
@@ -375,7 +375,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 1697642c1f738c4887e4ed2577dd49a9b2b8621e..deb683abacf0f6055dcbb3e0ef65b47071e933de 100644 (file)
@@ -260,7 +260,7 @@ int __init arch_clk_init(void)
                        &div4_table);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 04ab5aeaf9206527d0eae807a317ecd342a233fd..e84a43229b9c5af1ec9c7935c8cf8150c698cdac 100644 (file)
@@ -148,7 +148,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index ab1c58f2d101172b6f769d48278795d6e26adbda..1c83788db76a1f0c0055a1b8dd088073380cc579 100644 (file)
@@ -175,7 +175,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 491709483e109bc75213963b508b6608c55bc9ad..8bba6f15902350d4b7429e960e0a49627b60ef59 100644 (file)
@@ -194,7 +194,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 0f11b392bf466ef2334c221b02cd64de37134dac..a9422dab0ce747115bca41176e47d63f63d78ec4 100644 (file)
@@ -149,7 +149,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index ff1f0e6e9becd18f111d3a3d83730fd4860dfe02..b7cf6a547f117c5eeae4b69f6b1ccbcdb41b37c8 100644 (file)
@@ -1568,86 +1568,6 @@ ___clear_user_exit:
 
 #endif /* CONFIG_MMU */
 
-/*
- * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
- *                        int __count)
- *
- * Inputs:
- * (r2)  target address
- * (r3)  source address
- * (r4)  maximum size in bytes
- *
- * Ouputs:
- * (*r2) copied data
- * (r2)  -EFAULT (in case of faulting)
- *       copied data (otherwise)
- */
-       .global __strncpy_from_user
-__strncpy_from_user:
-       pta     ___strncpy_from_user1, tr0
-       pta     ___strncpy_from_user_done, tr1
-       or      r4, ZERO, r5            /* r5 = original count */
-       beq/u   r4, r63, tr1            /* early exit if r4==0 */
-       movi    -(EFAULT), r6           /* r6 = reply, no real fixup */
-       or      ZERO, ZERO, r7          /* r7 = data, clear top byte of data */
-
-___strncpy_from_user1:
-       ld.b    r3, 0, r7               /* Fault address: only in reading */
-       st.b    r2, 0, r7
-       addi    r2, 1, r2
-       addi    r3, 1, r3
-       beq/u   ZERO, r7, tr1
-       addi    r4, -1, r4              /* return real number of copied bytes */
-       bne/l   ZERO, r4, tr0
-
-___strncpy_from_user_done:
-       sub     r5, r4, r6              /* If done, return copied */
-
-___strncpy_from_user_exit:
-       or      r6, ZERO, r2
-       ptabs   LINK, tr0
-       blink   tr0, ZERO
-
-/*
- * extern long __strnlen_user(const char *__s, long __n)
- *
- * Inputs:
- * (r2)  source address
- * (r3)  source size in bytes
- *
- * Ouputs:
- * (r2)  -EFAULT (in case of faulting)
- *       string length (otherwise)
- */
-       .global __strnlen_user
-__strnlen_user:
-       pta     ___strnlen_user_set_reply, tr0
-       pta     ___strnlen_user1, tr1
-       or      ZERO, ZERO, r5          /* r5 = counter */
-       movi    -(EFAULT), r6           /* r6 = reply, no real fixup */
-       or      ZERO, ZERO, r7          /* r7 = data, clear top byte of data */
-       beq     r3, ZERO, tr0
-
-___strnlen_user1:
-       ldx.b   r2, r5, r7              /* Fault address: only in reading */
-       addi    r3, -1, r3              /* No real fixup */
-       addi    r5, 1, r5
-       beq     r3, ZERO, tr0
-       bne     r7, ZERO, tr1
-! The line below used to be active.  This meant led to a junk byte lying between each pair
-! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
-! via the argv and envp arguments to main, it meant the 'flat' representation visible through
-! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
-!      addi    r5, 1, r5               /* Include '\0' */
-
-___strnlen_user_set_reply:
-       or      r5, ZERO, r6            /* If done, return counter */
-
-___strnlen_user_exit:
-       or      r6, ZERO, r2
-       ptabs   LINK, tr0
-       blink   tr0, ZERO
-
 /*
  * extern long __get_user_asm_?(void *val, long addr)
  *
@@ -1982,8 +1902,6 @@ asm_uaccess_start:
        .long   ___copy_user2, ___copy_user_exit
        .long   ___clear_user1, ___clear_user_exit
 #endif
-       .long   ___strncpy_from_user1, ___strncpy_from_user_exit
-       .long   ___strnlen_user1, ___strnlen_user_exit
        .long   ___get_user_asm_b1, ___get_user_asm_b_exit
        .long   ___get_user_asm_w1, ___get_user_asm_w_exit
        .long   ___get_user_asm_l1, ___get_user_asm_l_exit
index 9b7a459a4613d8573a7c84ac3bf970402431c31e..055d91b70305f3f2a45bf176507d484a4e49e619 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/export.h>
 #include <linux/stackprotector.h>
+#include <asm/fpu.h>
 
 struct kmem_cache *task_xstate_cachep = NULL;
 unsigned int xstate_size;
index 4264583eabac52a292a4a3c79281b858020cd62d..602545b12a8678857c8752407be691a1bd511baa 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/switch_to.h>
 
 struct task_struct *last_task_used_math = NULL;
+struct pt_regs fake_swapper_regs = { 0, };
 
 void show_regs(struct pt_regs *regs)
 {
index 45afa5c51f6751585e2d8a8beba5cc11e837b5c8..26a0774f5272af46839c1faaf924ae9a4e29af13 100644 (file)
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b);
 EXPORT_SYMBOL(__get_user_asm_w);
 EXPORT_SYMBOL(__get_user_asm_l);
 EXPORT_SYMBOL(__get_user_asm_q);
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(__copy_user);
index cb4172c8af7d81c90e373a7c9a7ecb61a0bbd3a9..d6b7b6154f8764576abac916033326ba1c1e6bd2 100644 (file)
@@ -32,8 +32,6 @@
 #include <asm/syscalls.h>
 #include <asm/fpu.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct fdpic_func_descriptor {
        unsigned long   text;
        unsigned long   GOT;
@@ -226,7 +224,6 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, &r0))
@@ -256,7 +253,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -522,10 +518,11 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
+             struct pt_regs *regs, unsigned int save_r0)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Set up the stack frame */
@@ -534,10 +531,10 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
        else
                ret = setup_frame(sig, ka, oldset, regs);
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (ret)
+               return;
+       signal_delivered(sig, info, ka, regs,
+                       test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -554,7 +551,6 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -565,30 +561,12 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
        if (!user_mode(regs))
                return;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                handle_syscall_restart(save_r0, regs, &ka.sa);
 
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &ka, &info, oldset,
-                                 regs, save_r0) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                       test_thread_flag(TIF_SINGLESTEP));
-               }
-
+               handle_signal(signr, &ka, &info, regs, save_r0);
                return;
        }
 
@@ -610,10 +588,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
@@ -626,7 +601,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index b589a354c069aec0ee66d231b7790c7c0d9b3717..6b5b3dfe886b2d3c15a4b08a4d4f21ab74e90da0 100644 (file)
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-               sigset_t *oldset, struct pt_regs * regs);
+               struct pt_regs * regs);
 
 static inline void
 handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
@@ -88,7 +86,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -99,28 +96,13 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, 0);
        if (signr > 0) {
                handle_syscall_restart(regs, &ka.sa);
 
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * If a signal was successfully delivered, the
-                        * saved sigmask is in its frame, and we can
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                       test_thread_flag(TIF_SINGLESTEP));
-                       return;
-               }
+               handle_signal(signr, &info, &ka, regs);
+               return;
        }
 
        /* Did we come from a system call? */
@@ -143,12 +125,7 @@ static void do_signal(struct pt_regs *regs)
        }
 
        /* No signal to deliver -- put the saved sigmask back */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
-
-       return;
+       restore_saved_sigmask();
 }
 
 /*
@@ -351,7 +328,6 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, &ret))
@@ -384,7 +360,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
@@ -659,10 +634,11 @@ give_sigsegv:
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-               sigset_t *oldset, struct pt_regs * regs)
+               struct pt_regs * regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Set up the stack frame */
@@ -671,10 +647,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        else
                ret = setup_frame(sig, ka, oldset, regs);
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
+       if (ret)
+               return;
 
-       return ret;
+       signal_delivered(sig, info, ka, regs,
+                       test_thread_flag(TIF_SINGLESTEP));
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -685,7 +662,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index b86e9ca79455d7cacb08a21b2e2199bc8879ee7c..2062aa88af41cc696d1f5892782245c4d73190fa 100644 (file)
@@ -123,7 +123,6 @@ void native_play_dead(void)
 int __cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
-       struct task_struct *p;
        int ret;
 
        ret = mp_ops->cpu_disable(cpu);
@@ -153,11 +152,7 @@ int __cpu_disable(void)
        flush_cache_all();
        local_flush_tlb_all();
 
-       read_lock(&tasklist_lock);
-       for_each_process(p)
-               if (p->mm)
-                       cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-       read_unlock(&tasklist_lock);
+       clear_tasks_mm_cpumask(cpu);
 
        return 0;
 }
index 83bd051754e1fecddc0be891676bb9f57d62ad8f..e74ff137762661844783fe76a30986fe01308e9e 100644 (file)
@@ -41,7 +41,6 @@ config SPARC32
        def_bool !64BIT
        select GENERIC_ATOMIC64
        select CLZ_TAB
-       select ARCH_USES_GETTIMEOFFSET
 
 config SPARC64
        def_bool 64BIT
index cbb93e5141de0ff27d4d0197363fff3c23397e9b..61ebe7411ceb0af141b4bc9ceef1c1bcf36521fc 100644 (file)
 #define ASI_M_UNA01         0x01   /* Same here... */
 #define ASI_M_MXCC          0x02   /* Access to TI VIKING MXCC registers */
 #define ASI_M_FLUSH_PROBE   0x03   /* Reference MMU Flush/Probe; rw, ss */
-#ifndef CONFIG_SPARC_LEON
 #define ASI_M_MMUREGS       0x04   /* MMU Registers; rw, ss */
-#else
-#define ASI_M_MMUREGS       0x19
-#endif /* CONFIG_SPARC_LEON */
 #define ASI_M_TLBDIAG       0x05   /* MMU TLB only Diagnostics */
 #define ASI_M_DIAGS         0x06   /* Reference MMU Diagnostics */
 #define ASI_M_IODIAG        0x07   /* MMU I/O TLB only Diagnostics */
index 02a172fb193aaded11b3080f2e5337ee951e1ad9..a0e28ef025587a8384f3825f04a5ec66cd99ca75 100644 (file)
 /* All traps low-level code here must end with this macro. */
 #define RESTORE_ALL b ret_trap_entry; clr %l6;
 
+/* Support for run-time patching of single instructions.
+ * This is used to handle the differences in the ASI for
+ * MMUREGS for LEON and SUN.
+ *
+ * Sample:
+ * LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0
+ * SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0
+ * PI == Patch Instruction
+ *
+ * For LEON we will use the first variant,
+ * and for all other we will use the SUN variant.
+ * The order is important.
+ */
+#define LEON_PI(...)                           \
+662:   __VA_ARGS__
+
+#define SUN_PI_(...)                           \
+       .section .leon_1insn_patch, "ax";       \
+       .word 662b;                             \
+       __VA_ARGS__;                            \
+       .previous
+
 #endif /* !(_SPARC_ASMMACRO_H) */
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h
deleted file mode 100644 (file)
index 870db59..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _SPARC64_CMT_H
-#define _SPARC64_CMT_H
-
-/* cmt.h: Chip Multi-Threading register definitions
- *
- * Copyright (C) 2004 David S. Miller (davem@redhat.com)
- */
-
-/* ASI_CORE_ID - private */
-#define LP_ID          0x0000000000000010UL
-#define  LP_ID_MAX     0x00000000003f0000UL
-#define  LP_ID_ID      0x000000000000003fUL
-
-/* ASI_INTR_ID - private */
-#define LP_INTR_ID     0x0000000000000000UL
-#define  LP_INTR_ID_ID 0x00000000000003ffUL
-
-/* ASI_CESR_ID - private */
-#define CESR_ID                0x0000000000000040UL
-#define  CESR_ID_ID    0x00000000000000ffUL
-
-/* ASI_CORE_AVAILABLE - shared */
-#define LP_AVAIL       0x0000000000000000UL
-#define  LP_AVAIL_1    0x0000000000000002UL
-#define  LP_AVAIL_0    0x0000000000000001UL
-
-/* ASI_CORE_ENABLE_STATUS - shared */
-#define LP_ENAB_STAT   0x0000000000000010UL
-#define  LP_ENAB_STAT_1        0x0000000000000002UL
-#define  LP_ENAB_STAT_0        0x0000000000000001UL
-
-/* ASI_CORE_ENABLE - shared */
-#define LP_ENAB                0x0000000000000020UL
-#define  LP_ENAB_1     0x0000000000000002UL
-#define  LP_ENAB_0     0x0000000000000001UL
-
-/* ASI_CORE_RUNNING - shared */
-#define LP_RUNNING_RW  0x0000000000000050UL
-#define LP_RUNNING_W1S 0x0000000000000060UL
-#define LP_RUNNING_W1C 0x0000000000000068UL
-#define  LP_RUNNING_1  0x0000000000000002UL
-#define  LP_RUNNING_0  0x0000000000000001UL
-
-/* ASI_CORE_RUNNING_STAT - shared */
-#define LP_RUN_STAT    0x0000000000000058UL
-#define  LP_RUN_STAT_1 0x0000000000000002UL
-#define  LP_RUN_STAT_0 0x0000000000000001UL
-
-/* ASI_XIR_STEERING - shared */
-#define LP_XIR_STEER   0x0000000000000030UL
-#define  LP_XIR_STEER_1        0x0000000000000002UL
-#define  LP_XIR_STEER_0        0x0000000000000001UL
-
-/* ASI_CMT_ERROR_STEERING - shared */
-#define CMT_ER_STEER   0x0000000000000040UL
-#define  CMT_ER_STEER_1        0x0000000000000002UL
-#define  CMT_ER_STEER_0        0x0000000000000001UL
-
-#endif /* _SPARC64_CMT_H */
index 48a7c65731d2e0cf08ab2e974adcf922f2dddc58..8493fd3c7ba5a5ea39364948c26ecdd4feef41b8 100644 (file)
@@ -12,13 +12,18 @@ extern int dma_supported(struct device *dev, u64 mask);
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
+extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops *leon_dma_ops;
+extern struct dma_map_ops pci32_dma_ops;
+
 extern struct bus_type pci_bus_type;
 
 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
-       if (dev->bus == &pci_bus_type)
+       if (sparc_cpu_model == sparc_leon)
+               return leon_dma_ops;
+       else if (dev->bus == &pci_bus_type)
                return &pci32_dma_ops;
 #endif
        return dma_ops;
index 07659124c1404da4ac11200c5269eb7131c0929f..3375c6293893654bfb0ab46a41ee084ccd3d8cc6 100644 (file)
@@ -8,8 +8,6 @@
 #ifndef LEON_H_INCLUDE
 #define LEON_H_INCLUDE
 
-#ifdef CONFIG_SPARC_LEON
-
 /* mmu register access, ASI_LEON_MMUREGS */
 #define LEON_CNR_CTRL          0x000
 #define LEON_CNR_CTXP          0x100
 
 #ifndef __ASSEMBLY__
 
-/* do a virtual address read without cache */
-static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
-{
-       unsigned long retval;
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
-       return retval;
-}
-
 /* do a physical address bypass write, i.e. for 0x80000000 */
 static inline void leon_store_reg(unsigned long paddr, unsigned long value)
 {
@@ -87,47 +76,16 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
        return retval;
 }
 
-static inline void leon_srmmu_disabletlb(void)
-{
-       unsigned int retval;
-       __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
-                            "i"(ASI_LEON_MMUREGS));
-       retval |= LEON_CNR_CTRL_TLBDIS;
-       __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
-                            "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
-static inline void leon_srmmu_enabletlb(void)
-{
-       unsigned int retval;
-       __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
-                            "i"(ASI_LEON_MMUREGS));
-       retval = retval & ~LEON_CNR_CTRL_TLBDIS;
-       __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
-                            "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
 /* macro access for leon_load_reg() and leon_store_reg() */
 #define LEON3_BYPASS_LOAD_PA(x)            (leon_load_reg((unsigned long)(x)))
 #define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
-#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
-#define LEON3_BYPASS_ORIN_PA(x, v)  LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
 #define LEON_BYPASS_LOAD_PA(x)      leon_load_reg((unsigned long)(x))
 #define LEON_BYPASS_STORE_PA(x, v)  leon_store_reg((unsigned long)(x), (unsigned long)(v))
-#define LEON_REGLOAD_PA(x)          leon_load_reg((unsigned long)(x)+LEON_PREGS)
-#define LEON_REGSTORE_PA(x, v)      leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
-#define LEON_REGSTORE_OR_PA(x, v)   LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
-#define LEON_REGSTORE_AND_PA(x, v)  LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
-
-/* macro access for leon_readnobuffer_reg() */
-#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
 
 extern void leon_init(void);
 extern void leon_switch_mm(void);
 extern void leon_init_IRQ(void);
 
-extern unsigned long last_valid_pfn;
-
 static inline unsigned long sparc_leon3_get_dcachecfg(void)
 {
        unsigned int retval;
@@ -230,9 +188,6 @@ static inline int sparc_leon3_cpuid(void)
 #error cannot determine LEON_PAGE_SIZE_LEON
 #endif
 
-#define PAGE_MIN_SHIFT   (12)
-#define PAGE_MIN_SIZE    (1UL << PAGE_MIN_SHIFT)
-
 #define LEON3_XCCR_SETS_MASK  0x07000000UL
 #define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
 
@@ -242,7 +197,7 @@ static inline int sparc_leon3_cpuid(void)
 #ifndef __ASSEMBLY__
 struct vm_area_struct;
 
-extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
+extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
 extern void leon_flush_icache_all(void);
 extern void leon_flush_dcache_all(void);
 extern void leon_flush_cache_all(void);
@@ -258,15 +213,7 @@ struct leon3_cacheregs {
        unsigned long dccr;     /* 0x0c - Data Cache Configuration Register */
 };
 
-/* struct that hold LEON2 cache configuration register
- * & configuration register
- */
-struct leon2_cacheregs {
-       unsigned long ccr, cfg;
-};
-
-#ifdef __KERNEL__
-
+#include <linux/irq.h>
 #include <linux/interrupt.h>
 
 struct device_node;
@@ -292,24 +239,15 @@ extern void leon_smp_done(void);
 extern void leon_boot_cpus(void);
 extern int leon_boot_one_cpu(int i, struct task_struct *);
 void leon_init_smp(void);
-extern void cpu_idle(void);
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-extern int __leon_processor_id(void);
 void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
 extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
 
-extern unsigned int real_irq_entry[];
 extern unsigned int smpleon_ipi[];
-extern unsigned int patchme_maybe_smp_msg[];
-extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
-extern unsigned int linux_trap_ipi15_sun4m[];
+extern unsigned int linux_trap_ipi15_leon[];
 extern int leon_ipi_irq;
 
 #endif /* CONFIG_SMP */
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASSEMBLY__ */
 
 /* macros used in leon_mm.c */
@@ -317,18 +255,4 @@ extern int leon_ipi_irq;
 #define _pfn_valid(pfn)         ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
 #define _SRMMU_PTE_PMASK_LEON 0xffffffff
 
-#else /* defined(CONFIG_SPARC_LEON) */
-
-/* nop definitions for !LEON case */
-#define leon_init() do {} while (0)
-#define leon_switch_mm() do {} while (0)
-#define leon_init_IRQ() do {} while (0)
-#define init_leon() do {} while (0)
-#define leon_smp_done() do {} while (0)
-#define leon_boot_cpus() do {} while (0)
-#define leon_boot_one_cpu(i, t) 1
-#define leon_init_smp() do {} while (0)
-
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
 #endif
index e50f326e71bd1244c091da75519f2638b09ab8f9..f3034eddf4682569c257a2ce19941b013a98c218 100644 (file)
@@ -87,8 +87,6 @@ struct amba_prom_registers {
 #define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
 #define LEON3_GPTIMER_CTRL_ISPENDING(r)  (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
 
-#ifdef CONFIG_SPARC_LEON
-
 #ifndef __ASSEMBLY__
 
 struct leon3_irqctrl_regs_map {
@@ -264,6 +262,4 @@ extern unsigned int sparc_leon_eirq;
 
 #define amba_device(x) (((x) >> 12) & 0xfff)
 
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
 #endif
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h
deleted file mode 100644 (file)
index f842303..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * mpmbox.h:  Interface and defines for the OpenProm mailbox
- *               facilities for MP machines under Linux.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SPARC_MPMBOX_H
-#define _SPARC_MPMBOX_H
-
-/* The prom allocates, for each CPU on the machine an unsigned
- * byte in physical ram.  You probe the device tree prom nodes
- * for these values.  The purpose of this byte is to be able to
- * pass messages from one cpu to another.
- */
-
-/* These are the main message types we have to look for in our
- * Cpu mailboxes, based upon these values we decide what course
- * of action to take.
- */
-
-/* The CPU is executing code in the kernel. */
-#define MAILBOX_ISRUNNING     0xf0
-
-/* Another CPU called romvec->pv_exit(), you should call
- * prom_stopcpu() when you see this in your mailbox.
- */
-#define MAILBOX_EXIT          0xfb
-
-/* Another CPU called romvec->pv_enter(), you should call
- * prom_cpuidle() when this is seen.
- */
-#define MAILBOX_GOSPIN        0xfc
-
-/* Another CPU has hit a breakpoint either into kadb or the prom
- * itself.  Just like MAILBOX_GOSPIN, you should call prom_cpuidle()
- * at this point.
- */
-#define MAILBOX_BPT_SPIN      0xfd
-
-/* Oh geese, some other nitwit got a damn watchdog reset.  The party's
- * over so go call prom_stopcpu().
- */
-#define MAILBOX_WDOG_STOP     0xfe
-
-#ifndef __ASSEMBLY__
-
-/* Handy macro's to determine a cpu's state. */
-
-/* Is the cpu still in Power On Self Test? */
-#define MBOX_POST_P(letter)  ((letter) >= 0x00 && (letter) <= 0x7f)
-
-/* Is the cpu at the 'ok' prompt of the PROM? */
-#define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f)
-
-/* Is the cpu spinning in the PROM? */
-#define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef)
-
-/* Sanity check... This is junk mail, throw it out. */
-#define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa)
-
-/* Is the cpu actively running an application/kernel-code? */
-#define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING)
-
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* !(_SPARC_MPMBOX_H) */
index cb828703a63ae853f702d781eac4ce69395ad4a9..79da17866fa8997ab3032f444b7598076dacaf36 100644 (file)
         restore %g0, %g0, %g0;
 
 #ifndef __ASSEMBLY__
+extern unsigned long last_valid_pfn;
 
 /* This makes sense. Honest it does - Anton */
 /* XXX Yes but it's ugly as sin.  FIXME. -KMW */
@@ -148,67 +149,13 @@ extern void *srmmu_nocache_pool;
 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
 
 /* Accessing the MMU control register. */
-static inline unsigned int srmmu_get_mmureg(void)
-{
-        unsigned int retval;
-       __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
-                            "=r" (retval) :
-                            "i" (ASI_M_MMUREGS));
-       return retval;
-}
-
-static inline void srmmu_set_mmureg(unsigned long regval)
-{
-       __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
-                            "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
-
-}
-
-static inline void srmmu_set_ctable_ptr(unsigned long paddr)
-{
-       paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
-       __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-                            "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
-                            "i" (ASI_M_MMUREGS) :
-                            "memory");
-}
-
-static inline void srmmu_set_context(int context)
-{
-       __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-                            "r" (context), "r" (SRMMU_CTX_REG),
-                            "i" (ASI_M_MMUREGS) : "memory");
-}
-
-static inline int srmmu_get_context(void)
-{
-       register int retval;
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (SRMMU_CTX_REG),
-                            "i" (ASI_M_MMUREGS));
-       return retval;
-}
-
-static inline unsigned int srmmu_get_fstatus(void)
-{
-       unsigned int retval;
-
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
-       return retval;
-}
-
-static inline unsigned int srmmu_get_faddr(void)
-{
-       unsigned int retval;
-
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
-       return retval;
-}
+unsigned int srmmu_get_mmureg(void);
+void srmmu_set_mmureg(unsigned long regval);
+void srmmu_set_ctable_ptr(unsigned long paddr);
+void srmmu_set_context(int context);
+int srmmu_get_context(void);
+unsigned int srmmu_get_fstatus(void);
+unsigned int srmmu_get_faddr(void);
 
 /* This is guaranteed on all SRMMU's. */
 static inline void srmmu_flush_whole_tlb(void)
@@ -219,23 +166,6 @@ static inline void srmmu_flush_whole_tlb(void)
 
 }
 
-/* These flush types are not available on all chips... */
-#ifndef CONFIG_SPARC_LEON
-static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
-{
-       unsigned long retval;
-
-       vaddr &= PAGE_MASK;
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
-
-       return retval;
-}
-#else
-#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
-#endif
-
 static inline int
 srmmu_get_pte (unsigned long addr)
 {
index 3070f25ae90a3e235eaaf2373949226ea83acfb6..156220ed99eb7dfbfe8696ea9f04da84fc00ff5f 100644 (file)
@@ -9,8 +9,6 @@
 
 #if defined(__sparc__) && defined(__arch64__)
 /* sparc 64 bit */
-typedef unsigned int           __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 
 typedef unsigned short                __kernel_old_uid_t;
 typedef unsigned short         __kernel_old_gid_t;
@@ -38,9 +36,6 @@ typedef unsigned short         __kernel_gid_t;
 typedef unsigned short         __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef short                  __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef long                   __kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
index b8c0e5f0a66bbf1e4014bec94292320835914382..cee7ed9c927d9ac433db875646add31e212dc32b 100644 (file)
 #define PSR_VERS    0x0f000000         /* cpu-version field          */
 #define PSR_IMPL    0xf0000000         /* cpu-implementation field   */
 
+#define PSR_VERS_SHIFT         24
+#define PSR_IMPL_SHIFT         28
+#define PSR_VERS_SHIFTED_MASK  0xf
+#define PSR_IMPL_SHIFTED_MASK  0xf
+
+#define PSR_IMPL_TI            0x4
+#define PSR_IMPL_LEON          0xf
+
 #ifdef __KERNEL__
 
 #ifndef __ASSEMBLY__
index 0b0553bbd8a0dd26feb97be864a29997b754643e..f300d1a9b2b6f8f3650ef3b6d712f493e4a6b8b1 100644 (file)
@@ -7,4 +7,7 @@
 /* sparc entry point */
 extern char _start[];
 
+extern char __leon_1insn_patch[];
+extern char __leon_1insn_patch_end[];
+
 #endif
index 5af664932452190e66543500d21c9dcd6ba48763..e6cd224506a9355168ebea0c5ed0be8c3b625120 100644 (file)
@@ -131,8 +131,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 
 #define _TIF_DO_NOTIFY_RESUME_MASK     (_TIF_NOTIFY_RESUME | \
-                                        _TIF_SIGPENDING | \
-                                        _TIF_RESTORE_SIGMASK)
+                                        _TIF_SIGPENDING)
 
 #endif /* __KERNEL__ */
 
index 7f0981b094517aace2b461e2f6d7db9aa6d0fbfa..cfa8c38fb9c8511d51cfbe17e3d8792584d8b241 100644 (file)
@@ -238,7 +238,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif /* !__ASSEMBLY__ */
 
index 72308f9b0096e45b6da24fef022a261989d5eacf..6cf591b7e1c67e2b82c4e28bf177197ce0f602a7 100644 (file)
@@ -51,8 +51,8 @@ obj-y                   += of_device_common.o
 obj-y                   += of_device_$(BITS).o
 obj-$(CONFIG_SPARC64)   += prom_irqtrans.o
 
-obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
-obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
+obj-$(CONFIG_SPARC32)   += leon_kernel.o
+obj-$(CONFIG_SPARC32)   += leon_pmc.o
 
 obj-$(CONFIG_SPARC64)   += reboot.o
 obj-$(CONFIG_SPARC64)   += sysfs.o
index 2d1819641769fca63bad5c086bccd7006a8444ef..a6c94a2bf9d4b1150e41b63d3e71a81138fc1f63 100644 (file)
@@ -121,7 +121,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
                FPU(-1, NULL)
        }
 },{
-       4,
+       PSR_IMPL_TI,
        .cpu_info = {
                CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
                /* SparcClassic  --  borned STP1010TAB-50*/
@@ -191,7 +191,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
                FPU(-1, NULL)
        }
 },{
-       0xF,            /* Aeroflex Gaisler */
+       PSR_IMPL_LEON,          /* Aeroflex Gaisler */
        .cpu_info = {
                CPU(3, "LEON"),
                CPU(-1, NULL)
@@ -440,16 +440,16 @@ static int __init cpu_type_probe(void)
        int psr_impl, psr_vers, fpu_vers;
        int psr;
 
-       psr_impl = ((get_psr() >> 28) & 0xf);
-       psr_vers = ((get_psr() >> 24) & 0xf);
+       psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK);
+       psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK);
 
        psr = get_psr();
        put_psr(psr | PSR_EF);
-#ifdef CONFIG_SPARC_LEON
-       fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
-#else
-       fpu_vers = ((get_fsr() >> 17) & 0x7);
-#endif
+
+       if (psr_impl == PSR_IMPL_LEON)
+               fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
+       else
+               fpu_vers = ((get_fsr() >> 17) & 0x7);
 
        put_psr(psr);
 
index 2dbe1806e5300ab8bf49a1c22dda5850c263f75f..dcaa1cf0de40c790b27b32fbbdfe997cfdee072f 100644 (file)
@@ -393,7 +393,6 @@ linux_trap_ipi15_sun4d:
        /* FIXME */
 1:     b,a     1b
 
-#ifdef CONFIG_SPARC_LEON
        .globl  smpleon_ipi
        .extern leon_ipi_interrupt
        /* SMP per-cpu IPI interrupts are handled specially. */
@@ -424,8 +423,6 @@ linux_trap_ipi15_leon:
        b       ret_trap_lockless_ipi
         clr    %l6
 
-#endif /* CONFIG_SPARC_LEON */
-
 #endif /* CONFIG_SMP */
 
        /* This routine handles illegal instructions and privileged
@@ -770,8 +767,11 @@ srmmu_fault:
        mov     0x400, %l5
        mov     0x300, %l4
 
-       lda     [%l5] ASI_M_MMUREGS, %l6        ! read sfar first
-       lda     [%l4] ASI_M_MMUREGS, %l5        ! read sfsr last
+LEON_PI(lda    [%l5] ASI_LEON_MMUREGS, %l6)    ! read sfar first
+SUN_PI_(lda    [%l5] ASI_M_MMUREGS, %l6)       ! read sfar first
+
+LEON_PI(lda    [%l4] ASI_LEON_MMUREGS, %l5)    ! read sfsr last
+SUN_PI_(lda    [%l4] ASI_M_MMUREGS, %l5)       ! read sfsr last
 
        andn    %l6, 0xfff, %l6
        srl     %l5, 6, %l5                     ! and encode all info into l7
index 84b5f0d2afde51b344485c96ee2fbef57e2ae684..e3e80d65e39af1d9167a7ca3156ee2f797112a6c 100644 (file)
@@ -234,7 +234,8 @@ tsetup_srmmu_stackchk:
 
        cmp     %glob_tmp, %sp
        bleu,a  1f
-        lda    [%g0] ASI_M_MMUREGS, %glob_tmp          ! read MMU control
+LEON_PI( lda   [%g0] ASI_LEON_MMUREGS, %glob_tmp)      ! read MMU control
+SUN_PI_( lda   [%g0] ASI_M_MMUREGS, %glob_tmp)         ! read MMU control
 
 trap_setup_user_stack_is_bolixed:
        /* From user/kernel into invalid window w/bad user
@@ -249,18 +250,25 @@ trap_setup_user_stack_is_bolixed:
 1:
        /* Clear the fault status and turn on the no_fault bit. */
        or      %glob_tmp, 0x2, %glob_tmp               ! or in no_fault bit
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS          ! set it
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)              ! set it
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)         ! set it
 
        /* Dump the registers and cross fingers. */
        STORE_WINDOW(sp)
 
        /* Clear the no_fault bit and check the status. */
        andn    %glob_tmp, 0x2, %glob_tmp
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)
+
        mov     AC_M_SFAR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %g0)
+
        mov     AC_M_SFSR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %glob_tmp    ! save away status of winstore
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)   ! save away status of winstore
+
        andcc   %glob_tmp, 0x2, %g0                     ! did we fault?
        bne     trap_setup_user_stack_is_bolixed        ! failure
         nop
index a0f5c20e4b9cd5f286432d8bd4dc678cbc62e0bd..afeb1d7703032a76c5b29b38ec98cd068f74cb55 100644 (file)
  * the cpu-type
  */
        .align 4
-cputyp:
-        .word   1
-
-       .align 4
        .globl cputypval
 cputypval:
        .asciz "sun4m"
@@ -46,8 +42,8 @@ cputypvar:
 
        .align 4
 
-sun4c_notsup:
-       .asciz  "Sparc-Linux sun4/sun4c support does no longer exist.\n\n"
+notsup:
+       .asciz  "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n"
        .align 4
 
 sun4e_notsup:
@@ -123,7 +119,7 @@ current_pc:
                tst     %o0
                be      no_sun4u_here
                 mov    %g4, %o7                /* Previous %o7. */
-       
+
                mov     %o0, %l0                ! stash away romvec
                mov     %o0, %g7                ! put it here too
                mov     %o1, %l1                ! stash away debug_vec too
@@ -132,7 +128,7 @@ current_pc:
                set     current_pc, %g5
                cmp     %g3, %g5
                be      already_mapped
-                nop 
+                nop
 
                /* %l6 will hold the offset we have to subtract
                 * from absolute symbols in order to access areas
@@ -192,9 +188,9 @@ copy_prom_done:
                bne     not_a_sun4
                 nop
 
-halt_sun4_or_sun4c:
+halt_notsup:
                ld      [%g7 + 0x68], %o1
-               set     sun4c_notsup, %o0
+               set     notsup, %o0
                sub     %o0, %l6, %o0
                call    %o1
                 nop
@@ -202,18 +198,31 @@ halt_sun4_or_sun4c:
                 nop
 
 not_a_sun4:
+               /* It looks like this is a machine we support.
+                * Now find out what MMU we are dealing with
+                * LEON - identified by the psr.impl field
+                * Viking - identified by the psr.impl field
+                * In all other cases a sun4m srmmu.
+                * We check that the MMU is enabled in all cases.
+                */
+
+               /* Check if this is a LEON CPU */
+               rd      %psr, %g3
+               srl     %g3, PSR_IMPL_SHIFT, %g3
+               and     %g3, PSR_IMPL_SHIFTED_MASK, %g3
+               cmp     %g3, PSR_IMPL_LEON
+               be      leon_remap              /* It is a LEON - jump */
+                nop
+
+               /* Sanity-check, is MMU enabled */
                lda     [%g0] ASI_M_MMUREGS, %g1
                andcc   %g1, 1, %g0
-               be      halt_sun4_or_sun4c
+               be      halt_notsup
                 nop
 
-srmmu_remap:
-               /* First, check for a viking (TI) module. */
-               set     0x40000000, %g2
-               rd      %psr, %g3
-               and     %g2, %g3, %g3
-               subcc   %g3, 0x0, %g0
-               bz      srmmu_nviking
+               /* Check for a viking (TI) module. */
+               cmp     %g3, PSR_IMPL_TI
+               bne     srmmu_not_viking
                 nop
 
                /* Figure out what kind of viking we are on.
@@ -228,14 +237,14 @@ srmmu_remap:
                lda     [%g0] ASI_M_MMUREGS, %g3        ! peek in the control reg
                and     %g2, %g3, %g3
                subcc   %g3, 0x0, %g0
-               bnz     srmmu_nviking                   ! is in mbus mode
+               bnz     srmmu_not_viking                        ! is in mbus mode
                 nop
-               
+
                rd      %psr, %g3                       ! DO NOT TOUCH %g3
                andn    %g3, PSR_ET, %g2
                wr      %g2, 0x0, %psr
                WRITE_PAUSE
-               
+
                /* Get context table pointer, then convert to
                 * a physical address, which is 36 bits.
                 */
@@ -258,12 +267,12 @@ srmmu_remap:
                lda     [%g4] ASI_M_BYPASS, %o1         ! This is a level 1 ptr
                srl     %o1, 0x4, %o1                   ! Clear low 4 bits
                sll     %o1, 0x8, %o1                   ! Make physical
-               
+
                /* Ok, pull in the PTD. */
                lda     [%o1] ASI_M_BYPASS, %o2         ! This is the 0x0 16MB pgd
 
                /* Calculate to KERNBASE entry. */
-               add     %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3           
+               add     %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
 
                /* Poke the entry into the calculated address. */
                sta     %o2, [%o3] ASI_M_BYPASS
@@ -293,12 +302,12 @@ srmmu_remap:
                b       go_to_highmem
                 nop
 
+srmmu_not_viking:
                /* This works on viking's in Mbus mode and all
                 * other MBUS modules.  It is virtually the same as
                 * the above madness sans turning traps off and flipping
                 * the AC bit.
                 */
-srmmu_nviking:
                set     AC_M_CTPR, %g1
                lda     [%g1] ASI_M_MMUREGS, %g1        ! get ctx table ptr
                sll     %g1, 0x4, %g1                   ! make physical addr
@@ -313,6 +322,29 @@ srmmu_nviking:
                 nop                                    ! wheee....
 
 
+leon_remap:
+               /* Sanity-check, is MMU enabled */
+               lda     [%g0] ASI_LEON_MMUREGS, %g1
+               andcc   %g1, 1, %g0
+               be      halt_notsup
+                nop
+
+               /* Same code as in the srmmu_not_viking case,
+                * with the LEON ASI for mmuregs
+                */
+               set     AC_M_CTPR, %g1
+               lda     [%g1] ASI_LEON_MMUREGS, %g1     ! get ctx table ptr
+               sll     %g1, 0x4, %g1                   ! make physical addr
+               lda     [%g1] ASI_M_BYPASS, %g1         ! ptr to level 1 pg_table
+               srl     %g1, 0x4, %g1
+               sll     %g1, 0x8, %g1                   ! make phys addr for l1 tbl
+
+               lda     [%g1] ASI_M_BYPASS, %g2         ! get level1 entry for 0x0
+               add     %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+               sta     %g2, [%g3] ASI_M_BYPASS         ! place at KERNBASE entry
+               b       go_to_highmem
+                nop                                    ! wheee....
+
 /* Now do a non-relative jump so that PC is in high-memory */
 go_to_highmem:
                set     execute_in_high_mem, %g1
@@ -336,8 +368,9 @@ execute_in_high_mem:
                sethi   %hi(linux_dbvec), %g1
                st      %o1, [%g1 + %lo(linux_dbvec)]
 
-/* Get the machine type via the mysterious romvec node operations. */
-
+               /* Get the machine type via the romvec
+                * getprops node operation
+                */
                add     %g7, 0x1c, %l1
                ld      [%l1], %l0
                ld      [%l0], %l0
@@ -356,9 +389,42 @@ execute_in_high_mem:
                                                ! to a buf where above string
                                                ! will get stored by the prom.
 
-#ifdef CONFIG_SPARC_LEON
-               /* no cpu-type check is needed, it is a SPARC-LEON */
 
+               /* Check value of "compatible" property.
+                * "value" => "model"
+                * leon => sparc_leon
+                * sun4m => sun4m
+                * sun4s => sun4m
+                * sun4d => sun4d
+                * sun4e => "no_sun4e_here"
+                * '*'   => "no_sun4u_here"
+                * Check single letters only
+                */
+
+               set     cputypval, %o2
+               /* If cputypval[0] == 'l' (lower case letter L) this is leon */
+               ldub    [%o2], %l1
+               cmp     %l1, 'l'
+               be      leon_init
+                nop
+
+               /* Check cputypval[4] to find the sun model */
+               ldub    [%o2 + 0x4], %l1
+
+               cmp     %l1, 'm'
+               be      sun4m_init
+                cmp    %l1, 's'
+               be      sun4m_init
+                cmp    %l1, 'd'
+               be      sun4d_init
+                cmp    %l1, 'e'
+               be      no_sun4e_here           ! Could be a sun4e.
+                nop
+               b       no_sun4u_here           ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
+                nop
+
+leon_init:
+               /* LEON CPU - set boot_cpu_id */
                sethi   %hi(boot_cpu_id), %g2   ! boot-cpu index
 
 #ifdef CONFIG_SMP
@@ -376,26 +442,6 @@ execute_in_high_mem:
 
                ba continue_boot
                 nop
-#endif
-
-/* Check to cputype. We may be booted on a sun4u (64 bit box),
- * and sun4d needs special treatment.
- */
-
-               set     cputypval, %o2
-               ldub    [%o2 + 0x4], %l1
-
-               cmp     %l1, 'm'
-               be      sun4m_init
-                cmp    %l1, 's'
-               be      sun4m_init
-                cmp    %l1, 'd'
-               be      sun4d_init
-                cmp    %l1, 'e'
-               be      no_sun4e_here           ! Could be a sun4e.
-                nop
-               b       no_sun4u_here           ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
-                nop
 
 /* CPUID in bootbus can be found at PA 0xff0140000 */
 #define SUN4D_BOOTBUS_CPUID     0xf0140000
@@ -431,9 +477,9 @@ sun4m_init:
 /* This sucks, apparently this makes Vikings call prom panic, will fix later */
 2:
                rd      %psr, %o1
-               srl     %o1, 28, %o1            ! Get a type of the CPU
+               srl     %o1, PSR_IMPL_SHIFT, %o1        ! Get a type of the CPU
 
-               subcc   %o1, 4, %g0             ! TI: Viking or MicroSPARC
+               subcc   %o1, PSR_IMPL_TI, %g0           ! TI: Viking or MicroSPARC
                be      continue_boot
                 nop
 
@@ -459,10 +505,6 @@ continue_boot:
 /* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
  * show-time!
  */
-
-               sethi   %hi(cputyp), %o0
-               st      %g4, [%o0 + %lo(cputyp)]
-
                /* Turn on Supervisor, EnableFloating, and all the PIL bits.
                 * Also puts us in register window zero with traps off.
                 */
@@ -480,7 +522,7 @@ continue_boot:
                set     __bss_start , %o0       ! First address of BSS
                set     _end , %o1              ! Last address of BSS
                add     %o0, 0x1, %o0
-1:     
+1:
                stb     %g0, [%o0]
                subcc   %o0, %o1, %g0
                bl      1b
@@ -546,7 +588,7 @@ continue_boot:
                set     dest, %g2; \
                ld      [%g5], %g4; \
                st      %g4, [%g2];
-       
+
                /* Patch for window spills... */
                PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
                PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
@@ -597,7 +639,7 @@ continue_boot:
                st      %g4, [%g5 + 0x18]
                st      %g4, [%g5 + 0x1c]
 
-2:             
+2:
                sethi   %hi(nwindows), %g4
                st      %g3, [%g4 + %lo(nwindows)]      ! store final value
                sub     %g3, 0x1, %g3
@@ -617,18 +659,12 @@ continue_boot:
                wr      %g3, PSR_ET, %psr
                WRITE_PAUSE
 
-               /* First we call prom_init() to set up PROMLIB, then
-                * off to start_kernel().
-                */
-
+               /* Call sparc32_start_kernel(struct linux_romvec *rp) */
                sethi   %hi(prom_vector_p), %g5
                ld      [%g5 + %lo(prom_vector_p)], %o0
-               call    prom_init
+               call    sparc32_start_kernel
                 nop
 
-               call    start_kernel
-                nop
-       
                /* We should not get here. */
                call    halt_me
                 nop
@@ -659,7 +695,7 @@ sun4u_5:
                .asciz "write"
                .align  4
 sun4u_6:
-               .asciz  "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r"
+               .asciz  "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r"
 sun4u_6e:
                .align  4
 sun4u_7:
index a2846f5e32d8042246e1764132c4ae9ec32bb857..0f094db918c7f8b711aeaab6ba01ac46847332cd 100644 (file)
@@ -55,17 +55,13 @@ const struct sparc32_dma_ops *sparc32_dma_ops;
 /* This function must make sure that caches and memory are coherent after DMA
  * On LEON systems without cache snooping it flushes the entire D-CACHE.
  */
-#ifndef CONFIG_SPARC_LEON
 static inline void dma_make_coherent(unsigned long pa, unsigned long len)
 {
+       if (sparc_cpu_model == sparc_leon) {
+               if (!sparc_leon3_snooping_enabled())
+                       leon_flush_dcache_all();
+       }
 }
-#else
-static inline void dma_make_coherent(unsigned long pa, unsigned long len)
-{
-       if (!sparc_leon3_snooping_enabled())
-               leon_flush_dcache_all();
-}
-#endif
 
 static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
 static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
@@ -427,9 +423,6 @@ arch_initcall(sparc_register_ioport);
 #endif /* CONFIG_SBUS */
 
 
-/* LEON reuses PCI DMA ops */
-#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
-
 /* Allocate and map kernel buffer using consistent mode DMA for a device.
  * hwdev should be valid struct pci_dev pointer for PCI devices.
  */
@@ -657,14 +650,11 @@ struct dma_map_ops pci32_dma_ops = {
 };
 EXPORT_SYMBOL(pci32_dma_ops);
 
-#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+/* leon re-uses pci32_dma_ops */
+struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
+EXPORT_SYMBOL(leon_dma_ops);
 
-#ifdef CONFIG_SPARC_LEON
-struct dma_map_ops *dma_ops = &pci32_dma_ops;
-#elif defined(CONFIG_SBUS)
 struct dma_map_ops *dma_ops = &sbus_dma_ops;
-#endif
-
 EXPORT_SYMBOL(dma_ops);
 
 
index ae04914f7774be143a95964d139f1f2e1003da7d..c145f6fd123b88814c9d650e781dd391ea0e73f7 100644 (file)
@@ -241,9 +241,6 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
        unsigned int cpu_irq;
        int err;
 
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
-       struct tt_entry *trap_table;
-#endif
 
        err = request_irq(irq, irq_handler, 0, "floppy", NULL);
        if (err)
@@ -264,13 +261,18 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
        table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
 
        INSTANTIATE(sparc_ttable)
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
-       trap_table = &trapbase_cpu1;
-       INSTANTIATE(trap_table)
-       trap_table = &trapbase_cpu2;
-       INSTANTIATE(trap_table)
-       trap_table = &trapbase_cpu3;
-       INSTANTIATE(trap_table)
+
+#if defined CONFIG_SMP
+       if (sparc_cpu_model != sparc_leon) {
+               struct tt_entry *trap_table;
+
+               trap_table = &trapbase_cpu1;
+               INSTANTIATE(trap_table)
+               trap_table = &trapbase_cpu2;
+               INSTANTIATE(trap_table)
+               trap_table = &trapbase_cpu3;
+               INSTANTIATE(trap_table)
+       }
 #endif
 #undef INSTANTIATE
        /*
index a86372d345870202a8bb27e9d72c18b8f7574cb7..291bb5de9ce0963184518a9a00872370821075d1 100644 (file)
@@ -26,6 +26,9 @@ static inline unsigned long kimage_addr_to_ra(const char *p)
 #endif
 
 #ifdef CONFIG_SPARC32
+/* setup_32.c */
+void sparc32_start_kernel(struct linux_romvec *rp);
+
 /* cpu.c */
 extern void cpu_probe(void);
 
index 77c1b916e4dd35c8b04422a1e7102583b1b8a45c..e34e2c40c0609eac5b41b7220f01304cd2373e20 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/smp.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
 #include "prom.h"
 #include "irq.h"
 
index 519ca923f59f491468d6252423effac83cc9f9d2..4e174321097d7a4af24eb0f5cd3bb4b1d5b37e53 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/pm.h>
 
 #include <asm/leon_amba.h>
+#include <asm/cpu_type.h>
 #include <asm/leon.h>
 
 /* List of Systems that need fixup instructions around power-down instruction */
@@ -65,13 +66,15 @@ void pmc_leon_idle(void)
 /* Install LEON Power Down function */
 static int __init leon_pmc_install(void)
 {
-       /* Assign power management IDLE handler */
-       if (pmc_leon_need_fixup())
-               pm_idle = pmc_leon_idle_fixup;
-       else
-               pm_idle = pmc_leon_idle;
+       if (sparc_cpu_model == sparc_leon) {
+               /* Assign power management IDLE handler */
+               if (pmc_leon_need_fixup())
+                       pm_idle = pmc_leon_idle_fixup;
+               else
+                       pm_idle = pmc_leon_idle;
 
-       printk(KERN_INFO "leon: power management initialized\n");
+               printk(KERN_INFO "leon: power management initialized\n");
+       }
 
        return 0;
 }
index a469090faf9f12f83251b2432614b0724d9e67e0..0f3fb6d9c8efc3e8cd0f12e48d3ff26d87a6699f 100644 (file)
 
 #include "kernel.h"
 
-#ifdef CONFIG_SPARC_LEON
-
 #include "irq.h"
 
 extern ctxd_t *srmmu_ctx_table_phys;
 static int smp_processors_ready;
 extern volatile unsigned long cpu_callin_map[NR_CPUS];
 extern cpumask_t smp_commenced_mask;
-void __init leon_configure_cache_smp(void);
+void __cpuinit leon_configure_cache_smp(void);
 static void leon_ipi_init(void);
 
 /* IRQ number of LEON IPIs */
@@ -123,7 +121,7 @@ void __cpuinit leon_callin(void)
 
 extern struct linux_prom_registers smp_penguin_ctable;
 
-void __init leon_configure_cache_smp(void)
+void __cpuinit leon_configure_cache_smp(void)
 {
        unsigned long cfg = sparc_leon3_get_dcachecfg();
        int me = smp_processor_id();
@@ -507,5 +505,3 @@ void __init leon_init_smp(void)
 
        sparc32_ipi_ops = &leon_ipi_ops;
 }
-
-#endif /* CONFIG_SPARC_LEON */
index fe6787cc62fc9188cbf792cd53ba65f998eeccb3..cb36e82dcd5dd789221852c366934c93f26d7ba3 100644 (file)
@@ -65,50 +65,25 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
 struct task_struct *last_task_used_math = NULL;
 struct thread_info *current_set[NR_CPUS];
 
-#ifndef CONFIG_SMP
-
 /*
  * the idle loop on a Sparc... ;)
  */
 void cpu_idle(void)
 {
-       /* endless idle loop with no priority at all */
-       for (;;) {
-               if (pm_idle) {
-                       while (!need_resched())
-                               (*pm_idle)();
-               } else {
-                       while (!need_resched())
-                               cpu_relax();
-               }
-               schedule_preempt_disabled();
-       }
-}
-
-#else
+       set_thread_flag(TIF_POLLING_NRFLAG);
 
-/* This is being executed in task 0 'user space'. */
-void cpu_idle(void)
-{
-        set_thread_flag(TIF_POLLING_NRFLAG);
        /* endless idle loop with no priority at all */
-       while(1) {
-#ifdef CONFIG_SPARC_LEON
-               if (pm_idle) {
-                       while (!need_resched())
+       for (;;) {
+               while (!need_resched()) {
+                       if (pm_idle)
                                (*pm_idle)();
-               } else
-#endif
-               {
-                       while (!need_resched())
+                       else
                                cpu_relax();
                }
                schedule_preempt_disabled();
        }
 }
 
-#endif
-
 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
 void machine_halt(void)
 {
index 741df916c124b10b12751da38bf7d3dac335c5dd..1303021748c8cc3c5c2ca109e2310e149dca0471 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/of_pdt.h>
 #include <asm/prom.h>
 #include <asm/oplib.h>
-#include <asm/leon.h>
 
 #include "prom.h"
 
index 7abc24e2bf1a9f5bed13d56cd0bc5e746a5e0d95..6c34de0c2abd40eefc679e35d548ff4c2a132586 100644 (file)
@@ -231,11 +231,14 @@ srmmu_rett_stackchk:
        cmp     %g1, %fp
        bleu    ret_trap_user_stack_is_bolixed
         mov    AC_M_SFSR, %g1
-       lda     [%g1] ASI_M_MMUREGS, %g0
+LEON_PI(lda    [%g1] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda    [%g1] ASI_M_MMUREGS, %g0)
 
-       lda     [%g0] ASI_M_MMUREGS, %g1
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %g1)
        or      %g1, 0x2, %g1
-       sta     %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %g1, [%g0] ASI_M_MMUREGS)
 
        restore %g0, %g0, %g0
 
@@ -244,13 +247,16 @@ srmmu_rett_stackchk:
        save    %g0, %g0, %g0
 
        andn    %g1, 0x2, %g1
-       sta     %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %g1, [%g0] ASI_M_MMUREGS)
 
        mov     AC_M_SFAR, %g2
-       lda     [%g2] ASI_M_MMUREGS, %g2
+LEON_PI(lda    [%g2] ASI_LEON_MMUREGS, %g2)
+SUN_PI_(lda    [%g2] ASI_M_MMUREGS, %g2)
 
        mov     AC_M_SFSR, %g1
-       lda     [%g1] ASI_M_MMUREGS, %g1
+LEON_PI(lda    [%g1] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda    [%g1] ASI_M_MMUREGS, %g1)
        andcc   %g1, 0x2, %g0
        be      ret_trap_userwins_ok
         nop
index c052313f4dc578700193abaf5fc6e9b127e66e0a..efe3e64bba38bd80814d51958bdb046b9c8f8a68 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/cpu.h>
 #include <linux/kdebug.h>
 #include <linux/export.h>
+#include <linux/start_kernel.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
@@ -45,6 +46,7 @@
 #include <asm/cpudata.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
+#include <asm/sections.h>
 
 #include "kernel.h"
 
@@ -237,28 +239,42 @@ static void __init per_cpu_patch(void)
        }
 }
 
+struct leon_1insn_patch_entry {
+       unsigned int addr;
+       unsigned int insn;
+};
+
 enum sparc_cpu sparc_cpu_model;
 EXPORT_SYMBOL(sparc_cpu_model);
 
-struct tt_entry *sparc_ttable;
+static __init void leon_patch(void)
+{
+       struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch;
+       struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end;
 
-struct pt_regs fake_swapper_regs;
+       /* Default instruction is leon - no patching */
+       if (sparc_cpu_model == sparc_leon)
+               return;
 
-void __init setup_arch(char **cmdline_p)
-{
-       int i;
-       unsigned long highest_paddr;
+       while (start < end) {
+               unsigned long addr = start->addr;
 
-       sparc_ttable = (struct tt_entry *) &trapbase;
+               *(unsigned int *)(addr) = start->insn;
+               flushi(addr);
 
-       /* Initialize PROM console and command line. */
-       *cmdline_p = prom_getbootargs();
-       strcpy(boot_command_line, *cmdline_p);
-       parse_early_param();
+               start++;
+       }
+}
 
-       boot_flags_init(*cmdline_p);
+struct tt_entry *sparc_ttable;
+struct pt_regs fake_swapper_regs;
 
-       register_console(&prom_early_console);
+/* Called from head_32.S - before we have setup anything
+ * in the kernel. Be very careful with what you do here.
+ */
+void __init sparc32_start_kernel(struct linux_romvec *rp)
+{
+       prom_init(rp);
 
        /* Set sparc_cpu_model */
        sparc_cpu_model = sun_unknown;
@@ -275,6 +291,26 @@ void __init setup_arch(char **cmdline_p)
        if (!strncmp(&cputypval[0], "leon" , 4))
                sparc_cpu_model = sparc_leon;
 
+       leon_patch();
+       start_kernel();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+       int i;
+       unsigned long highest_paddr;
+
+       sparc_ttable = (struct tt_entry *) &trapbase;
+
+       /* Initialize PROM console and command line. */
+       *cmdline_p = prom_getbootargs();
+       strcpy(boot_command_line, *cmdline_p);
+       parse_early_param();
+
+       boot_flags_init(*cmdline_p);
+
+       register_console(&prom_early_console);
+
        printk("ARCH: ");
        switch(sparc_cpu_model) {
        case sun4m:
index bb1513e45f1a811b05a07979995726b99eca1a94..a53e0a5fd3a3d944a5eb42391e2862c92a23c044 100644 (file)
@@ -32,8 +32,6 @@
 
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* This magic should be in g_upper[0] for all upper parts
  * to be valid.
  */
@@ -274,7 +272,6 @@ void do_sigreturn32(struct pt_regs *regs)
                case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
                case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
        }
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 
@@ -376,7 +373,6 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
                case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
                case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
        }
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 segv:
@@ -775,7 +771,7 @@ sigsegv:
        return -EFAULT;
 }
 
-static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
                                  siginfo_t *info,
                                  sigset_t *oldset, struct pt_regs *regs)
 {
@@ -787,12 +783,9 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
                err = setup_frame32(ka, regs, signr, oldset);
 
        if (err)
-               return err;
-
-       block_sigmask(ka, signr);
-       tracehook_signal_handler(signr, info, ka, regs, 0);
+               return;
 
-       return 0;
+       signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -841,14 +834,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart32(orig_i0, regs, &ka.sa);
-               if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
-                       /* A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal32(signr, &ka, &info, oldset, regs);
                return;
        }
        if (restart_syscall &&
@@ -872,10 +858,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs)
        /* If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 struct sigstack32 {
index 2b7e849f7c6528b1da91146f3f515ccbf21370c3..68f9c8650af47b2731c622cc8ada8768b63d90cb 100644 (file)
@@ -29,8 +29,6 @@
 
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
                   void *fpqueue, unsigned long *fpqdepth);
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
@@ -130,7 +128,6 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
        if (err)
                goto segv_and_exit;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 
@@ -197,7 +194,6 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
                        goto segv;
        }
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 segv:
@@ -449,10 +445,11 @@ sigsegv:
        return -EFAULT;
 }
 
-static inline int
+static inline void
 handle_signal(unsigned long signr, struct k_sigaction *ka,
-             siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
+             siginfo_t *info, struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int err;
 
        if (ka->sa.sa_flags & SA_SIGINFO)
@@ -461,12 +458,9 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
                err = setup_frame(ka, regs, signr, oldset);
 
        if (err)
-               return err;
-
-       block_sigmask(ka, signr);
-       tracehook_signal_handler(signr, info, ka, regs, 0);
+               return;
 
-       return 0;
+       signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -498,7 +492,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 {
        struct k_sigaction ka;
        int restart_syscall;
-       sigset_t *oldset;
        siginfo_t info;
        int signr;
 
@@ -523,11 +516,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
                regs->u_regs[UREG_G6] = orig_i0;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
        /* If the debugger messes with the program counter, it clears
@@ -544,15 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
        if (restart_syscall &&
@@ -576,22 +556,17 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        /* if there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
                      unsigned long thread_info_flags)
 {
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs, orig_i0);
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index eafaab486b2d24465a0911e46a08c35ce17d3d29..867de2f8189c32acf359fe5575cd139ede6b7048 100644 (file)
@@ -38,8 +38,6 @@
 #include "systbls.h"
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* {set, get}context() needed for 64-bit SparcLinux userland. */
 asmlinkage void sparc64_set_context(struct pt_regs *regs)
 {
@@ -71,7 +69,6 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
                        if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
                                goto do_sigsegv;
                }
-               sigdelsetmask(&set, ~_BLOCKABLE);
                set_current_blocked(&set);
        }
        if (test_thread_flag(TIF_32BIT)) {
@@ -315,7 +312,6 @@ void do_rt_sigreturn(struct pt_regs *regs)
        /* Prevent syscall restart.  */
        pt_regs_clear_syscall(regs);
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 segv:
@@ -466,7 +462,7 @@ sigsegv:
        return -EFAULT;
 }
 
-static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
                                siginfo_t *info,
                                sigset_t *oldset, struct pt_regs *regs)
 {
@@ -475,12 +471,9 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
        err = setup_rt_frame(ka, regs, signr, oldset,
                             (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
        if (err)
-               return err;
-
-       block_sigmask(ka, signr);
-       tracehook_signal_handler(signr, info, ka, regs, 0);
+               return;
 
-       return 0;
+       signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -512,7 +505,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 {
        struct k_sigaction ka;
        int restart_syscall;
-       sigset_t *oldset;
+       sigset_t *oldset = sigmask_to_save();
        siginfo_t info;
        int signr;
        
@@ -538,11 +531,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
            (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
                regs->u_regs[UREG_G6] = orig_i0;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
 #ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
                extern void do_signal32(sigset_t *, struct pt_regs *);
@@ -563,14 +551,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /* A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal(signr, &ka, &info, oldset, regs);
                return;
        }
        if (restart_syscall &&
@@ -594,10 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        /* If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
@@ -607,8 +585,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index 3ee51f189a55297b0babeb1f54d0b40af97de6f8..275f74fd6f6a3f16fdd4e5fae291af2a364075c0 100644 (file)
@@ -580,16 +580,9 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr,    unsigned long, old_len,
                unsigned long, new_len, unsigned long, flags,
                unsigned long, new_addr)
 {
-       unsigned long ret = -EINVAL;
-
        if (test_thread_flag(TIF_32BIT))
-               goto out;
-
-       down_write(&current->mm->mmap_sem);
-       ret = do_mremap(addr, old_len, new_len, flags, new_addr);
-       up_write(&current->mm->mmap_sem);
-out:
-       return ret;       
+               return -EINVAL;
+       return sys_mremap(addr, old_len, new_len, flags, new_addr);
 }
 
 /* we come to here via sys_nis_syscall so it can setup the regs argument */
index 7364ddc9e5aadd64317bc48c48aada5948b16727..af27acab44868a085ad2182049d81a6da61831e2 100644 (file)
@@ -149,8 +149,6 @@ sun4d_cpu_startup:
 
        b,a     smp_do_cpu_idle
 
-#ifdef CONFIG_SPARC_LEON
-
        __CPUINIT
        .align  4
         .global leon_smp_cpu_startup, smp_penguin_ctable
@@ -161,7 +159,7 @@ leon_smp_cpu_startup:
         ld [%g1+4],%g1
         srl %g1,4,%g1
         set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
-       sta %g1, [%g5] ASI_M_MMUREGS
+       sta %g1, [%g5] ASI_LEON_MMUREGS
 
        /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
        set     (PSR_PIL | PSR_S | PSR_PS), %g1
@@ -207,5 +205,3 @@ leon_smp_cpu_startup:
         nop
 
        b,a     smp_do_cpu_idle
-
-#endif
index c72fdf55e1c108435b6f4193b1c8377b441eef3b..3b05e6697710da1718be093e1f56dccaddc04e94 100644 (file)
@@ -2054,7 +2054,7 @@ void do_fpieee(struct pt_regs *regs)
        do_fpe_common(regs);
 }
 
-extern int do_mathemu(struct pt_regs *, struct fpustate *);
+extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
 
 void do_fpother(struct pt_regs *regs)
 {
@@ -2068,7 +2068,7 @@ void do_fpother(struct pt_regs *regs)
        switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
        case (2 << 14): /* unfinished_FPop */
        case (3 << 14): /* unimplemented_FPop */
-               ret = do_mathemu(regs, f);
+               ret = do_mathemu(regs, f, false);
                break;
        }
        if (ret)
@@ -2308,10 +2308,12 @@ void do_illegal_instruction(struct pt_regs *regs)
                        } else {
                                struct fpustate *f = FPUSTATE;
 
-                               /* XXX maybe verify XFSR bits like
-                                * XXX do_fpother() does?
+                               /* On UltraSPARC T2 and later, FPU insns which
+                                * are not implemented in HW signal an illegal
+                                * instruction trap and do not set the FP Trap
+                                * Trap in the %fsr to unimplemented_FPop.
                                 */
-                               if (do_mathemu(regs, f))
+                               if (do_mathemu(regs, f, true))
                                        return;
                        }
                }
index 5cffdc55f075a7c9229e40e6abfd97c44d211ed2..3e244f31e56b93c7f0e9487056d6b75d13951618 100644 (file)
@@ -443,7 +443,7 @@ static int __init vio_init(void)
        root_vdev = vio_create_one(hp, root, NULL);
        err = -ENODEV;
        if (!root_vdev) {
-               printk(KERN_ERR "VIO: Coult not create root device.\n");
+               printk(KERN_ERR "VIO: Could not create root device.\n");
                goto out_release;
        }
 
index 0e1605697b4905b030923be4d18ef2efa47fe423..89c2c29f154b4c45114df0dce93feee4ce8330af 100644 (file)
@@ -107,6 +107,11 @@ SECTIONS
                *(.sun4v_2insn_patch)
                __sun4v_2insn_patch_end = .;
        }
+       .leon_1insn_patch : {
+               __leon_1insn_patch = .;
+               *(.leon_1insn_patch)
+               __leon_1insn_patch_end = .;
+       }
        .swapper_tsb_phys_patch : {
                __swapper_tsb_phys_patch = .;
                *(.swapper_tsb_phys_patch)
index 4c2de3cf309b65ad77b9f92e0773b5bd2e1f9e8d..28a7bc69f82b1dcf2113547cd19697244843d7dd 100644 (file)
@@ -332,24 +332,30 @@ spwin_srmmu_stackchk:
         mov    AC_M_SFSR, %glob_tmp
 
        /* Clear the fault status and turn on the no_fault bit. */
-       lda     [%glob_tmp] ASI_M_MMUREGS, %g0          ! eat SFSR
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %g0)      ! eat SFSR
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %g0)         ! eat SFSR
 
-       lda     [%g0] ASI_M_MMUREGS, %glob_tmp          ! read MMU control
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %glob_tmp)      ! read MMU control
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %glob_tmp)         ! read MMU control
        or      %glob_tmp, 0x2, %glob_tmp               ! or in no_fault bit
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS          ! set it
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)      ! set it
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)         ! set it
 
        /* Dump the registers and cross fingers. */
        STORE_WINDOW(sp)
 
        /* Clear the no_fault bit and check the status. */
        andn    %glob_tmp, 0x2, %glob_tmp
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)
 
        mov     AC_M_SFAR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %g0)
 
        mov     AC_M_SFSR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)
        andcc   %glob_tmp, 0x2, %g0                     ! did we fault?
        be,a    spwin_finish_up + 0x4                   ! cool beans, success
         restore %g0, %g0, %g0
index 9fde91a249e06b470008c155b8bc56f8b7de08e9..2c21cc59683e2ca28d9c316e47c8b0cda6055795 100644 (file)
@@ -254,16 +254,19 @@ srmmu_fwin_stackchk:
        mov     AC_M_SFSR, %l4
        cmp     %l5, %sp
        bleu    fwin_user_stack_is_bolixed
-        lda    [%l4] ASI_M_MMUREGS, %g0        ! clear fault status
+LEON_PI( lda   [%l4] ASI_LEON_MMUREGS, %g0)    ! clear fault status
+SUN_PI_( lda   [%l4] ASI_M_MMUREGS, %g0)       ! clear fault status
 
        /* The technique is, turn off faults on this processor,
         * just let the load rip, then check the sfsr to see if
         * a fault did occur.  Then we turn on fault traps again
         * and branch conditionally based upon what happened.
         */
-       lda     [%g0] ASI_M_MMUREGS, %l5        ! read mmu-ctrl reg
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %l5)    ! read mmu-ctrl reg
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %l5)       ! read mmu-ctrl reg
        or      %l5, 0x2, %l5                   ! turn on no-fault bit
-       sta     %l5, [%g0] ASI_M_MMUREGS        ! store it
+LEON_PI(sta    %l5, [%g0] ASI_LEON_MMUREGS)    ! store it
+SUN_PI_(sta    %l5, [%g0] ASI_M_MMUREGS)       ! store it
 
        /* Cross fingers and go for it. */
        LOAD_WINDOW(sp)
@@ -275,18 +278,22 @@ srmmu_fwin_stackchk:
 
        /* LOCATION: Window 'T' */
 
-       lda     [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again
-       andn    %twin_tmp1, 0x2, %twin_tmp1     ! clear no-fault bit
-       sta     %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %twin_tmp1)     ! load mmu-ctrl again
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %twin_tmp1)        ! load mmu-ctrl again
+       andn    %twin_tmp1, 0x2, %twin_tmp1             ! clear no-fault bit
+LEON_PI(sta    %twin_tmp1, [%g0] ASI_LEON_MMUREGS)     ! store it
+SUN_PI_(sta    %twin_tmp1, [%g0] ASI_M_MMUREGS)        ! store it
 
        mov     AC_M_SFAR, %twin_tmp2
-       lda     [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address
+LEON_PI(lda    [%twin_tmp2] ASI_LEON_MMUREGS, %g0)     ! read fault address
+SUN_PI_(lda    [%twin_tmp2] ASI_M_MMUREGS, %g0)        ! read fault address
 
        mov     AC_M_SFSR, %twin_tmp2
-       lda     [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2  ! read fault status
-       andcc   %twin_tmp2, 0x2, %g0                    ! did fault occur?
+LEON_PI(lda    [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status
+SUN_PI_(lda    [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2)    ! read fault status
+       andcc   %twin_tmp2, 0x2, %g0                       ! did fault occur?
 
-       bne     1f                                      ! yep, cleanup
+       bne     1f                                         ! yep, cleanup
         nop
 
        wr      %t_psr, 0x0, %psr
index 2bbe2f28ad23355edb2716828769061327c2f829..1704068da92806d5868d0374bea669f31f21b113 100644 (file)
@@ -163,7 +163,7 @@ typedef union {
        u64 q[2];
 } *argp;
 
-int do_mathemu(struct pt_regs *regs, struct fpustate *f)
+int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
 {
        unsigned long pc = regs->tpc;
        unsigned long tstate = regs->tstate;
@@ -218,7 +218,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
                        case FSQRTS: {
                                unsigned long x = current_thread_info()->xfsr[0];
 
-                               x = (x >> 14) & 0xf;
+                               x = (x >> 14) & 0x7;
                                TYPE(x,1,1,1,1,0,0);
                                break;
                        }
@@ -226,7 +226,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
                        case FSQRTD: {
                                unsigned long x = current_thread_info()->xfsr[0];
 
-                               x = (x >> 14) & 0xf;
+                               x = (x >> 14) & 0x7;
                                TYPE(x,2,1,2,1,0,0);
                                break;
                        }
@@ -357,9 +357,17 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
        if (type) {
                argp rs1 = NULL, rs2 = NULL, rd = NULL;
                
-               freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
-               if (freg != (type >> 9))
-                       goto err;
+               /* Starting with UltraSPARC-T2, the cpu does not set the FP Trap
+                * Type field in the %fsr to unimplemented_FPop.  Nor does it
+                * use the fp_exception_other trap.  Instead it signals an
+                * illegal instruction and leaves the FP trap type field of
+                * the %fsr unchanged.
+                */
+               if (!illegal_insn_trap) {
+                       int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7;
+                       if (ftt != (type >> 9))
+                               goto err;
+               }
                current_thread_info()->xfsr[0] &= ~0x1c000;
                freg = ((insn >> 14) & 0x1f);
                switch (type & 0x3) {
index 69ffd3112fed2053ae72f42c7a602a43d7378d13..30c3eccfdf5a209f8b09458824aa9d706f3079dd 100644 (file)
@@ -8,8 +8,9 @@ obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
 obj-y                   += fault_$(BITS).o
 obj-y                   += init_$(BITS).o
 obj-$(CONFIG_SPARC32)   += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32)   += srmmu_access.o
 obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
-obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
+obj-$(CONFIG_SPARC32)   += leon_mm.o
 
 # Only used by sparc64
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
index 4c67ae6e50231990bcd40ce3c2170a2f7c070497..5bed085a2c17984a5c33fc5ea509005e3db679cb 100644 (file)
@@ -32,7 +32,7 @@ static inline unsigned long leon_get_ctable_ptr(void)
 }
 
 
-unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
 {
 
        unsigned int ctxtbl;
index 256db6b22c54ed7cf2cc26412d5013930896703b..62e3f57733037d89d828d389bdefff93c0bc3f0b 100644 (file)
@@ -646,6 +646,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
        }
 }
 
+/* These flush types are not available on all chips... */
+static inline unsigned long srmmu_probe(unsigned long vaddr)
+{
+       unsigned long retval;
+
+       if (sparc_cpu_model != sparc_leon) {
+
+               vaddr &= PAGE_MASK;
+               __asm__ __volatile__("lda [%1] %2, %0\n\t" :
+                                    "=r" (retval) :
+                                    "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
+       } else {
+               retval = leon_swprobe(vaddr, 0);
+       }
+       return retval;
+}
+
 /*
  * This is much cleaner than poking around physical address space
  * looking at the prom's page table directly which is what most
@@ -665,7 +682,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        break; /* probably wrap around */
                if(start == 0xfef00000)
                        start = KADB_DEBUGGER_BEGVM;
-               if(!(prompte = srmmu_hwprobe(start))) {
+               if(!(prompte = srmmu_probe(start))) {
                        start += PAGE_SIZE;
                        continue;
                }
@@ -674,12 +691,12 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                what = 0;
     
                if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
-                       if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
+                       if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
                                what = 1;
                }
     
                if(!(start & ~(SRMMU_PGDIR_MASK))) {
-                       if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+                       if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
                           prompte)
                                what = 2;
                }
@@ -1156,7 +1173,7 @@ static void turbosparc_flush_page_to_ram(unsigned long page)
 #ifdef TURBOSPARC_WRITEBACK
        volatile unsigned long clear;
 
-       if (srmmu_hwprobe(page))
+       if (srmmu_probe(page))
                turbosparc_flush_page_cache(page);
        clear = srmmu_get_fstatus();
 #endif
diff --git a/arch/sparc/mm/srmmu_access.S b/arch/sparc/mm/srmmu_access.S
new file mode 100644 (file)
index 0000000..d0a67b2
--- /dev/null
@@ -0,0 +1,82 @@
+/* Assembler variants of srmmu access functions.
+ * Implemented in assembler to allow run-time patching.
+ * LEON uses a different ASI for MMUREGS than SUN.
+ *
+ * The leon_1insn_patch infrastructure is used
+ * for the run-time patching.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asmmacro.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asi.h>
+
+/* unsigned int srmmu_get_mmureg(void) */
+ENTRY(srmmu_get_mmureg)
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_mmureg)
+
+/* void srmmu_set_mmureg(unsigned long regval) */
+ENTRY(srmmu_set_mmureg)
+LEON_PI(sta    %o0, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %o0, [%g0] ASI_M_MMUREGS)
+       retl
+        nop
+ENDPROC(srmmu_set_mmureg)
+
+/* void srmmu_set_ctable_ptr(unsigned long paddr) */
+ENTRY(srmmu_set_ctable_ptr)
+       /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
+       srl     %o0, 4, %g1
+       and     %g1, SRMMU_CTX_PMASK, %g1
+
+       mov     SRMMU_CTXTBL_PTR, %g2
+LEON_PI(sta    %g1, [%g2] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %g1, [%g2] ASI_M_MMUREGS)
+       retl
+        nop
+ENDPROC(srmmu_set_ctable_ptr)
+
+
+/* void srmmu_set_context(int context) */
+ENTRY(srmmu_set_context)
+       mov     SRMMU_CTX_REG, %g1
+LEON_PI(sta    %o0, [%g1] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %o0, [%g1] ASI_M_MMUREGS)
+       retl
+        nop
+ENDPROC(srmmu_set_context)
+
+
+/* int srmmu_get_context(void) */
+ENTRY(srmmu_get_context)
+       mov     SRMMU_CTX_REG, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%o0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_context)
+
+
+/* unsigned int srmmu_get_fstatus(void) */
+ENTRY(srmmu_get_fstatus)
+       mov     SRMMU_FAULT_STATUS, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%o0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_fstatus)
+
+
+/* unsigned int srmmu_get_faddr(void) */
+ENTRY(srmmu_get_faddr)
+       mov     SRMMU_FAULT_ADDR, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%o0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_faddr)
index 69adc08d36a52541b1be754dd7c809824b2d707a..6e74450ff0a110afc32901e273d74a77c80f8ca4 100644 (file)
@@ -44,7 +44,6 @@ typedef __kernel_uid32_t __compat_gid32_t;
 typedef __kernel_mode_t compat_mode_t;
 typedef __kernel_dev_t compat_dev_t;
 typedef __kernel_loff_t compat_loff_t;
-typedef __kernel_nlink_t compat_nlink_t;
 typedef __kernel_ipc_pid_t compat_ipc_pid_t;
 typedef __kernel_daddr_t compat_daddr_t;
 typedef __kernel_fsid_t        compat_fsid_t;
index 656c486e64fafd424863fabe0f66d52a36e5c243..e9c670d7a7fe2e13c1edd65c410f840ba75f5e32 100644 (file)
@@ -91,11 +91,6 @@ extern void smp_nap(void);
 /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
 extern void _cpu_idle(void);
 
-/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
-extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
-                                 unsigned long new_sp,
-                                 unsigned long new_ss10);
-
 #else /* __ASSEMBLY__ */
 
 /*
@@ -166,7 +161,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif /* !__ASSEMBLY__ */
 
index c3dd275f25e2f0304d4719358d42d692f046175b..9ab078a4605dd9750384b15b72b34c523b038fb5 100644 (file)
@@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs);
 #ifdef __tilegx__
 #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
 #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
-#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
 #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
 #else
 #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
index 9092ce8aa6b472b302ec31d514bb2da71fac3bfe..f8b74ca83b9257a245adaa6a03d05cdb75431c97 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <asm/byteorder.h>
 #include <asm/backtrace.h>
 #include <asm/tile-desc.h>
 #include <arch/abi.h>
@@ -336,8 +337,12 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
                                bytes_to_prefetch / sizeof(tile_bundle_bits);
                }
 
-               /* Decode the next bundle. */
-               bundle.bits = prefetched_bundles[next_bundle++];
+               /*
+                * Decode the next bundle.
+                * TILE always stores instruction bundles in little-endian
+                * mode, even when the chip is running in big-endian mode.
+                */
+               bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
                bundle.num_insns =
                        parse_insn_tile(bundle.bits, pc, bundle.insns);
                num_info_ops = bt_get_info_ops(&bundle, info_operands);
index cdef6e5ec022cdbe6ab4c1c139f56dffc7fbcd8e..474571b8408584d37743678ce02b2ca77c3c9938 100644 (file)
@@ -118,8 +118,6 @@ struct compat_rt_sigframe {
        struct compat_ucontext uc;
 };
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
                             struct compat_sigaction __user *oact,
                             size_t sigsetsize)
@@ -302,7 +300,6 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
index 133c4b56a99ec7196c59f9dca60742953826bf75..c31637baff283698f776daf5a83c480dd00b26d6 100644 (file)
@@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current)
        jrp lr   /* keep backtracer happy */
        STD_ENDPROC(KBacktraceIterator_init_current)
 
-/*
- * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
- * free the old stack (passed in r0) and re-invoke cpu_idle().
- * We update sp and ksp0 simultaneously to avoid backtracer warnings.
- */
-STD_ENTRY(cpu_idle_on_new_stack)
-       {
-        move sp, r1
-        mtspr SPR_SYSTEM_SAVE_K_0, r2
-       }
-       jal free_thread_info
-       j cpu_idle
-       STD_ENDPROC(cpu_idle_on_new_stack)
-
 /* Loop forever on a nap during SMP boot. */
 STD_ENTRY(smp_nap)
        nap
index ba1023d8a0218ca3a33918cf3a256a592a77fb5f..6be7991505019a30ce3ff6268c275a931a088644 100644 (file)
@@ -565,8 +565,6 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
                return 1;
        }
        if (thread_info_flags & _TIF_SINGLESTEP) {
index 6098ccc59be2484a22a5f11c862569d9850c4248..dd87f342039064a0e545382acdf65321d0815ac1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/smp.h>
 #include <linux/timex.h>
 #include <linux/hugetlb.h>
+#include <linux/start_kernel.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
index f79d4b88c747ae97db505a7f54143e610e37e24d..e29b0553211d611af9802dfe190f9d0d9d868f69 100644 (file)
@@ -37,8 +37,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
                stack_t __user *, uoss, struct pt_regs *, regs)
 {
@@ -96,7 +94,6 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -242,10 +239,11 @@ give_sigsegv:
  * OK, we're invoking a handler
  */
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-                        struct k_sigaction *ka, sigset_t *oldset,
+static void handle_signal(unsigned long sig, siginfo_t *info,
+                        struct k_sigaction *ka,
                         struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -278,15 +276,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        else
 #endif
                ret = setup_rt_frame(sig, ka, info, oldset, regs);
-       if (ret == 0) {
-               /* This code is only called from system calls or from
-                * the work_pending path in the return-to-user code, and
-                * either way we can re-enable interrupts unconditionally.
-                */
-               block_sigmask(ka, sig);
-       }
-
-       return ret;
+       if (ret)
+               return;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -299,7 +291,6 @@ void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * i386 will check if we're coming from kernel mode and bail out
@@ -308,24 +299,10 @@ void do_signal(struct pt_regs *regs)
         * helpful, we can reinstate the check on "!user_mode(regs)".
         */
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee! Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                goto done;
        }
 
@@ -350,10 +327,7 @@ void do_signal(struct pt_regs *regs)
        }
 
        /* If there's no signal to deliver, just put the saved sigmask back. */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 
 done:
        /* Avoid double syscall restart if there are nested signals. */
index 88e466b159dcac92a13d81b5919b0a5487d63830..43b39d61b538698229a23f654b7fb44a335187b3 100644 (file)
@@ -705,7 +705,6 @@ static void stack_proc(void *arg)
        struct task_struct *from = current, *to = arg;
 
        to->thread.saved_task = from;
-       rcu_switch_from(from);
        switch_to(from, to, from);
 }
 
index 76078490c2581c92a2c40cc039419e918f8b6f9d..e584e40ee8320f62e82e67abe6230a40ac75b19d 100644 (file)
@@ -6,9 +6,6 @@
 #ifndef __FRAME_KERN_H_
 #define __FRAME_KERN_H_
 
-#define _S(nr) (1<<((nr)-1))
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
 extern int setup_signal_stack_sc(unsigned long stack_top, int sig, 
                                 struct k_sigaction *ka,
                                 struct pt_regs *regs, 
index 3a2235e0abc3e18dec7862208364bb8de2d6916c..ccb9a9d283f165760b20fec6bb3b821f814b0a37 100644 (file)
@@ -117,11 +117,8 @@ void interrupt_end(void)
                schedule();
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal();
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(&current->thread.regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
 
 void exit_thread(void)
index 4d93dff6b3713e321a66f49ef160d3da4380e7a2..3d15243ce69234c3ddbac961ef58b0282247a4e4 100644 (file)
@@ -4,7 +4,9 @@
  */
 
 #include "linux/sched.h"
+#include "linux/spinlock.h"
 #include "linux/slab.h"
+#include "linux/oom.h"
 #include "kern_util.h"
 #include "os.h"
 #include "skas.h"
@@ -22,13 +24,18 @@ static void kill_off_processes(void)
                struct task_struct *p;
                int pid;
 
+               read_lock(&tasklist_lock);
                for_each_process(p) {
-                       if (p->mm == NULL)
-                               continue;
+                       struct task_struct *t;
 
-                       pid = p->mm->context.id.u.pid;
+                       t = find_lock_task_mm(p);
+                       if (!t)
+                               continue;
+                       pid = t->mm->context.id.u.pid;
+                       task_unlock(t);
                        os_kill_ptraced_process(pid, 1);
                }
+               read_unlock(&tasklist_lock);
        }
 }
 
index 292e706016c521842b3654ea0e1de2619211dc60..7362d58efc29612c1ffaad64558b3a986b820515 100644 (file)
 EXPORT_SYMBOL(block_signals);
 EXPORT_SYMBOL(unblock_signals);
 
-#define _S(nr) (1<<((nr)-1))
-
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(struct pt_regs *regs, unsigned long signr,
-                        struct k_sigaction *ka, siginfo_t *info,
-                        sigset_t *oldset)
+static void handle_signal(struct pt_regs *regs, unsigned long signr,
+                        struct k_sigaction *ka, siginfo_t *info)
 {
+       sigset_t *oldset = sigmask_to_save();
        unsigned long sp;
        int err;
 
@@ -65,9 +61,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
        if (err)
                force_sigsegv(signr, current);
        else
-               block_sigmask(ka, signr);
-
-       return err;
+               signal_delivered(signr, info, ka, regs, 0);
 }
 
 static int kern_do_signal(struct pt_regs *regs)
@@ -77,24 +71,9 @@ static int kern_do_signal(struct pt_regs *regs)
        int sig, handled_sig = 0;
 
        while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
-               sigset_t *oldset;
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
                handled_sig = 1;
                /* Whee!  Actually deliver the signal.  */
-               if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) {
-                       /*
-                        * a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-                       break;
-               }
+               handle_signal(regs, sig, &ka_copy, &info);
        }
 
        /* Did we come from a system call? */
@@ -130,10 +109,8 @@ static int kern_do_signal(struct pt_regs *regs)
         * if there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       if (!handled_sig)
+               restore_saved_sigmask();
        return handled_sig;
 }
 
index dafc9471595021748eda5e750a80d3483a662379..3be60765c0e25d634282ea66e0902a9dc8d41bd4 100644 (file)
@@ -30,6 +30,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        pmd_t *pmd;
        pte_t *pte;
        int err = -EFAULT;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                                (is_write ? FAULT_FLAG_WRITE : 0);
 
        *code_out = SEGV_MAPERR;
 
@@ -40,6 +42,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        if (in_atomic())
                goto out_nosemaphore;
 
+retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (!vma)
@@ -65,7 +68,11 @@ good_area:
        do {
                int fault;
 
-               fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+               fault = handle_mm_fault(mm, vma, address, flags);
+
+               if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+                       goto out_nosemaphore;
+
                if (unlikely(fault & VM_FAULT_ERROR)) {
                        if (fault & VM_FAULT_OOM) {
                                goto out_of_memory;
@@ -75,10 +82,17 @@ good_area:
                        }
                        BUG();
                }
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
+               if (flags & FAULT_FLAG_ALLOW_RETRY) {
+                       if (fault & VM_FAULT_MAJOR)
+                               current->maj_flt++;
+                       else
+                               current->min_flt++;
+                       if (fault & VM_FAULT_RETRY) {
+                               flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+                               goto retry;
+                       }
+               }
 
                pgd = pgd_offset(mm, address);
                pud = pud_offset(pgd, address);
index 7754df6ef7d45444dea14b6ebd93a035e54309d6..8adedb37720a0e997dea9ea88abff76113904830 100644 (file)
@@ -21,8 +21,6 @@
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * For UniCore syscalls, we encode the syscall number into the instruction.
  */
@@ -61,10 +59,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
        int err;
 
        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
-       if (err == 0) {
-               sigdelsetmask(&set, ~_BLOCKABLE);
+       if (err == 0)
                set_current_blocked(&set);
-       }
 
        err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
        err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
@@ -312,13 +308,12 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
-             siginfo_t *info, sigset_t *oldset,
-             struct pt_regs *regs, int syscall)
+static void handle_signal(unsigned long sig, struct k_sigaction *ka,
+             siginfo_t *info, struct pt_regs *regs, int syscall)
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = current;
-       sigset_t blocked;
+       sigset_t *oldset = sigmask_to_save();
        int usig = sig;
        int ret;
 
@@ -364,15 +359,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
 
        if (ret != 0) {
                force_sigsegv(sig, tsk);
-               return ret;
+               return;
        }
 
-       /*
-        * Block the signal if we were successful.
-        */
-       block_sigmask(ka, sig);
-
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -399,32 +389,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
        if (!user_mode(regs))
                return;
 
-       if (try_to_freeze())
-               goto no_signal;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               sigset_t *oldset;
-
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-               if (handle_signal(signr, &ka, &info, oldset, regs, syscall)
-                               == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(signr, &ka, &info, regs, syscall);
                return;
        }
 
- no_signal:
        /*
         * No signal to deliver to the process - restart the syscall.
         */
@@ -451,8 +421,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
        /* If there's no signal to deliver, we just put the saved
         * sigmask back.
         */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
+       restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs,
@@ -464,8 +433,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
        if (thread_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index d700811785ea131aeeefb40026b9bd260886afa6..c70684f859e13473908a1370a9a3bb160db5c4e3 100644 (file)
@@ -1506,6 +1506,8 @@ config EFI_STUB
           This kernel feature allows a bzImage to be loaded directly
          by EFI firmware without the use of a bootloader.
 
+         See Documentation/x86/efi-stub.txt for more information.
+
 config SECCOMP
        def_bool y
        prompt "Enable seccomp to safely compute untrusted bytecode"
index 2c14e76bb4c71255ee9c1d9b29b171dd73a33d60..4e85f5f85837c17c041217c8240f90820e46921e 100644 (file)
 
 static efi_system_table_t *sys_table;
 
+static void efi_printk(char *str)
+{
+       char *s8;
+
+       for (s8 = str; *s8; s8++) {
+               struct efi_simple_text_output_protocol *out;
+               efi_char16_t ch[2] = { 0 };
+
+               ch[0] = *s8;
+               out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
+
+               if (*s8 == '\n') {
+                       efi_char16_t nl[2] = { '\r', 0 };
+                       efi_call_phys2(out->output_string, out, nl);
+               }
+
+               efi_call_phys2(out->output_string, out, ch);
+       }
+}
+
 static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size,
                              unsigned long *desc_size)
 {
@@ -531,8 +551,10 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
                                EFI_LOADER_DATA,
                                nr_initrds * sizeof(*initrds),
                                &initrds);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for initrds\n");
                goto fail;
+       }
 
        str = (char *)(unsigned long)hdr->cmd_line_ptr;
        for (i = 0; i < nr_initrds; i++) {
@@ -575,32 +597,42 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
 
                        status = efi_call_phys3(boottime->handle_protocol,
                                        image->device_handle, &fs_proto, &io);
-                       if (status != EFI_SUCCESS)
+                       if (status != EFI_SUCCESS) {
+                               efi_printk("Failed to handle fs_proto\n");
                                goto free_initrds;
+                       }
 
                        status = efi_call_phys2(io->open_volume, io, &fh);
-                       if (status != EFI_SUCCESS)
+                       if (status != EFI_SUCCESS) {
+                               efi_printk("Failed to open volume\n");
                                goto free_initrds;
+                       }
                }
 
                status = efi_call_phys5(fh->open, fh, &h, filename_16,
                                        EFI_FILE_MODE_READ, (u64)0);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to open initrd file\n");
                        goto close_handles;
+               }
 
                initrd->handle = h;
 
                info_sz = 0;
                status = efi_call_phys4(h->get_info, h, &info_guid,
                                        &info_sz, NULL);
-               if (status != EFI_BUFFER_TOO_SMALL)
+               if (status != EFI_BUFFER_TOO_SMALL) {
+                       efi_printk("Failed to get initrd info size\n");
                        goto close_handles;
+               }
 
 grow:
                status = efi_call_phys3(sys_table->boottime->allocate_pool,
                                        EFI_LOADER_DATA, info_sz, &info);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to alloc mem for initrd info\n");
                        goto close_handles;
+               }
 
                status = efi_call_phys4(h->get_info, h, &info_guid,
                                        &info_sz, info);
@@ -612,8 +644,10 @@ grow:
                file_sz = info->file_size;
                efi_call_phys1(sys_table->boottime->free_pool, info);
 
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to get initrd info\n");
                        goto close_handles;
+               }
 
                initrd->size = file_sz;
                initrd_total += file_sz;
@@ -629,11 +663,14 @@ grow:
                 */
                status = high_alloc(initrd_total, 0x1000,
                                   &initrd_addr, hdr->initrd_addr_max);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to alloc highmem for initrds\n");
                        goto close_handles;
+               }
 
                /* We've run out of free low memory. */
                if (initrd_addr > hdr->initrd_addr_max) {
+                       efi_printk("We've run out of free low memory\n");
                        status = EFI_INVALID_PARAMETER;
                        goto free_initrd_total;
                }
@@ -652,8 +689,10 @@ grow:
                                status = efi_call_phys3(fh->read,
                                                        initrds[j].handle,
                                                        &chunksize, addr);
-                               if (status != EFI_SUCCESS)
+                               if (status != EFI_SUCCESS) {
+                                       efi_printk("Failed to read initrd\n");
                                        goto free_initrd_total;
+                               }
                                addr += chunksize;
                                size -= chunksize;
                        }
@@ -674,7 +713,7 @@ free_initrd_total:
        low_free(initrd_total, initrd_addr);
 
 close_handles:
-       for (k = j; k < nr_initrds; k++)
+       for (k = j; k < i; k++)
                efi_call_phys1(fh->close, initrds[k].handle);
 free_initrds:
        efi_call_phys1(sys_table->boottime->free_pool, initrds);
@@ -732,8 +771,10 @@ static efi_status_t make_boot_params(struct boot_params *boot_params,
                        options_size++; /* NUL termination */
 
                        status = low_alloc(options_size, 1, &cmdline);
-                       if (status != EFI_SUCCESS)
+                       if (status != EFI_SUCCESS) {
+                               efi_printk("Failed to alloc mem for cmdline\n");
                                goto fail;
+                       }
 
                        s1 = (u8 *)(unsigned long)cmdline;
                        s2 = (u16 *)options;
@@ -895,12 +936,16 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
 
        status = efi_call_phys3(sys_table->boottime->handle_protocol,
                                handle, &proto, (void *)&image);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
                goto fail;
+       }
 
        status = low_alloc(0x4000, 1, (unsigned long *)&boot_params);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc lowmem for boot params\n");
                goto fail;
+       }
 
        memset(boot_params, 0x0, 0x4000);
 
@@ -933,8 +978,10 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
        if (status != EFI_SUCCESS) {
                status = low_alloc(hdr->init_size, hdr->kernel_alignment,
                                   &start);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to alloc mem for kernel\n");
                        goto fail;
+               }
        }
 
        hdr->code32_start = (__u32)start;
@@ -945,19 +992,25 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
        status = efi_call_phys3(sys_table->boottime->allocate_pool,
                                EFI_LOADER_DATA, sizeof(*gdt),
                                (void **)&gdt);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for gdt structure\n");
                goto fail;
+       }
 
        gdt->size = 0x800;
        status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for gdt\n");
                goto fail;
+       }
 
        status = efi_call_phys3(sys_table->boottime->allocate_pool,
                                EFI_LOADER_DATA, sizeof(*idt),
                                (void **)&idt);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for idt structure\n");
                goto fail;
+       }
 
        idt->size = 0;
        idt->address = 0;
index 39251663e65b3fb64404d3cb67299b33313818f1..3b6e15627c55f0b1c8b5b4b5c9c1a37ffebf8060 100644 (file)
@@ -58,4 +58,10 @@ struct efi_uga_draw_protocol {
        void *blt;
 };
 
+struct efi_simple_text_output_protocol {
+       void *reset;
+       void *output_string;
+       void *test_string;
+};
+
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index 8bbea6aa40d9705b68e1dd9531b7c9c74d96a106..efe5acfc79c30c8ab2df32586618fa6f7c2fc88b 100644 (file)
@@ -94,10 +94,10 @@ bs_die:
 
        .section ".bsdata", "a"
 bugger_off_msg:
-       .ascii  "Direct booting from floppy is no longer supported.\r\n"
-       .ascii  "Please use a boot loader program instead.\r\n"
+       .ascii  "Direct floppy boot is not supported. "
+       .ascii  "Use a boot loader program instead.\r\n"
        .ascii  "\n"
-       .ascii  "Remove disk and press any key to reboot . . .\r\n"
+       .ascii  "Remove disk and press any key to reboot ...\r\n"
        .byte   0
 
 #ifdef CONFIG_EFI_STUB
@@ -111,7 +111,7 @@ coff_header:
 #else
        .word   0x8664                          # x86-64
 #endif
-       .word   2                               # nr_sections
+       .word   3                               # nr_sections
        .long   0                               # TimeDateStamp
        .long   0                               # PointerToSymbolTable
        .long   1                               # NumberOfSymbols
@@ -158,8 +158,8 @@ extra_header_fields:
 #else
        .quad   0                               # ImageBase
 #endif
-       .long   0x1000                          # SectionAlignment
-       .long   0x200                           # FileAlignment
+       .long   0x20                            # SectionAlignment
+       .long   0x20                            # FileAlignment
        .word   0                               # MajorOperatingSystemVersion
        .word   0                               # MinorOperatingSystemVersion
        .word   0                               # MajorImageVersion
@@ -200,8 +200,10 @@ extra_header_fields:
 
        # Section table
 section_table:
-       .ascii  ".text"
-       .byte   0
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".setup"
        .byte   0
        .byte   0
        .long   0
@@ -217,9 +219,8 @@ section_table:
 
        #
        # The EFI application loader requires a relocation section
-       # because EFI applications must be relocatable. But since
-       # we don't need the loader to fixup any relocs for us, we
-       # just create an empty (zero-length) .reloc section header.
+       # because EFI applications must be relocatable. The .reloc
+       # offset & size fields are filled in by build.c.
        #
        .ascii  ".reloc"
        .byte   0
@@ -233,6 +234,25 @@ section_table:
        .word   0                               # NumberOfRelocations
        .word   0                               # NumberOfLineNumbers
        .long   0x42100040                      # Characteristics (section flags)
+
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".text"
+       .byte   0
+       .byte   0
+       .byte   0
+       .long   0
+       .long   0x0                             # startup_{32,64}
+       .long   0                               # Size of initialized data
+                                               # on disk
+       .long   0x0                             # startup_{32,64}
+       .long   0                               # PointerToRelocations
+       .long   0                               # PointerToLineNumbers
+       .word   0                               # NumberOfRelocations
+       .word   0                               # NumberOfLineNumbers
+       .long   0x60500020                      # Characteristics (section flags)
+
 #endif /* CONFIG_EFI_STUB */
 
        # Kernel attributes; used by setup.  This is part 1 of the
index 3f61f6e2b46f3ece150f20b409c1e652f3dd301b..4b8e165ee5723643dcd89619a341c7559eafe2bd 100644 (file)
@@ -50,6 +50,8 @@ typedef unsigned int   u32;
 u8 buf[SETUP_SECT_MAX*512];
 int is_big_kernel;
 
+#define PECOFF_RELOC_RESERVE 0x20
+
 /*----------------------------------------------------------------------*/
 
 static const u32 crctab32[] = {
@@ -133,11 +135,103 @@ static void usage(void)
        die("Usage: build setup system [> image]");
 }
 
-int main(int argc, char ** argv)
-{
 #ifdef CONFIG_EFI_STUB
-       unsigned int file_sz, pe_header;
+
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+       unsigned int pe_header;
+       unsigned short num_sections;
+       u8 *section;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+       num_sections = get_unaligned_le16(&buf[pe_header + 6]);
+
+#ifdef CONFIG_X86_32
+       section = &buf[pe_header + 0xa8];
+#else
+       section = &buf[pe_header + 0xb8];
 #endif
+
+       while (num_sections > 0) {
+               if (strncmp((char*)section, section_name, 8) == 0) {
+                       /* section header size field */
+                       put_unaligned_le32(size, section + 0x8);
+
+                       /* section header vma field */
+                       put_unaligned_le32(offset, section + 0xc);
+
+                       /* section header 'size of initialised data' field */
+                       put_unaligned_le32(size, section + 0x10);
+
+                       /* section header 'file offset' field */
+                       put_unaligned_le32(offset, section + 0x14);
+
+                       break;
+               }
+               section += 0x28;
+               num_sections--;
+       }
+}
+
+static void update_pecoff_setup_and_reloc(unsigned int size)
+{
+       u32 setup_offset = 0x200;
+       u32 reloc_offset = size - PECOFF_RELOC_RESERVE;
+       u32 setup_size = reloc_offset - setup_offset;
+
+       update_pecoff_section_header(".setup", setup_offset, setup_size);
+       update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
+
+       /*
+        * Modify .reloc section contents with a single entry. The
+        * relocation is applied to offset 10 of the relocation section.
+        */
+       put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
+       put_unaligned_le32(10, &buf[reloc_offset + 4]);
+}
+
+static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+{
+       unsigned int pe_header;
+       unsigned int text_sz = file_sz - text_start;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+
+       /* Size of image */
+       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+
+       /*
+        * Size of code: Subtract the size of the first sector (512 bytes)
+        * which includes the header.
+        */
+       put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
+
+#ifdef CONFIG_X86_32
+       /*
+        * Address of entry point.
+        *
+        * The EFI stub entry point is +16 bytes from the start of
+        * the .text section.
+        */
+       put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
+#else
+       /*
+        * Address of entry point. startup_32 is at the beginning and
+        * the 64-bit entry point (startup_64) is always 512 bytes
+        * after. The EFI stub entry point is 16 bytes after that, as
+        * the first instruction allows legacy loaders to jump over
+        * the EFI stub initialisation
+        */
+       put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
+#endif /* CONFIG_X86_32 */
+
+       update_pecoff_section_header(".text", text_start, text_sz);
+}
+
+#endif /* CONFIG_EFI_STUB */
+
+int main(int argc, char ** argv)
+{
        unsigned int i, sz, setup_sectors;
        int c;
        u32 sys_size;
@@ -163,6 +257,12 @@ int main(int argc, char ** argv)
                die("Boot block hasn't got boot flag (0xAA55)");
        fclose(file);
 
+#ifdef CONFIG_EFI_STUB
+       /* Reserve 0x20 bytes for .reloc section */
+       memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+       c += PECOFF_RELOC_RESERVE;
+#endif
+
        /* Pad unused space with zeros */
        setup_sectors = (c + 511) / 512;
        if (setup_sectors < SETUP_SECT_MIN)
@@ -170,6 +270,10 @@ int main(int argc, char ** argv)
        i = setup_sectors*512;
        memset(buf+c, 0, i-c);
 
+#ifdef CONFIG_EFI_STUB
+       update_pecoff_setup_and_reloc(i);
+#endif
+
        /* Set the default root device */
        put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
 
@@ -194,66 +298,8 @@ int main(int argc, char ** argv)
        put_unaligned_le32(sys_size, &buf[0x1f4]);
 
 #ifdef CONFIG_EFI_STUB
-       file_sz = sz + i + ((sys_size * 16) - sz);
-
-       pe_header = get_unaligned_le32(&buf[0x3c]);
-
-       /* Size of image */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
-       /*
-        * Subtract the size of the first section (512 bytes) which
-        * includes the header and .reloc section. The remaining size
-        * is that of the .text section.
-        */
-       file_sz -= 512;
-
-       /* Size of code */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
-
-#ifdef CONFIG_X86_32
-       /*
-        * Address of entry point.
-        *
-        * The EFI stub entry point is +16 bytes from the start of
-        * the .text section.
-        */
-       put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
-
-       /* .text size */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
-
-       /* .text vma */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xb4]);
-
-       /* .text size of initialised data */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
-
-       /* .text file offset */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xbc]);
-#else
-       /*
-        * Address of entry point. startup_32 is at the beginning and
-        * the 64-bit entry point (startup_64) is always 512 bytes
-        * after. The EFI stub entry point is 16 bytes after that, as
-        * the first instruction allows legacy loaders to jump over
-        * the EFI stub initialisation
-        */
-       put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
-
-       /* .text size */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
-
-       /* .text vma */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xc4]);
-
-       /* .text size of initialised data */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
-
-       /* .text file offset */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xcc]);
-#endif /* CONFIG_X86_32 */
-#endif /* CONFIG_EFI_STUB */
+       update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+#endif
 
        crc = partial_crc32(buf, i, crc);
        if (fwrite(buf, 1, i, stdout) != i)
index be6d9e365a800a8569fb4915fccbfc50b0793a09..3470624d7835fa646b7c9f8dafa6acd3d9ed5f2f 100644 (file)
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
        pxor IN3, STATE4
        movaps IN4, IV
 #else
-       pxor (INP), STATE2
-       pxor 0x10(INP), STATE3
        pxor IN1, STATE4
        movaps IN2, IV
+       movups (INP), IN1
+       pxor IN1, STATE2
+       movups 0x10(INP), IN2
+       pxor IN2, STATE3
 #endif
        movups STATE1, (OUTP)
        movups STATE2, 0x10(OUTP)
index 98bd70faccc50cb0bb07ad96066a74a990870239..673ac9b63d6bf51ca36e2a0f18c4ca0d125d4fde 100644 (file)
@@ -38,7 +38,7 @@
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
        int err = 0;
-       bool ia32 = is_ia32_task();
+       bool ia32 = test_thread_flag(TIF_IA32);
 
        if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
                return -EFAULT;
@@ -273,7 +273,6 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
@@ -299,7 +298,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
index 724aa441de7d0b6a0473c5cbe136ea756ed535ff..0c44630d17893e74a2672cf9e896c0a41b762c42 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mpspec.h>
+#include <asm/realmode.h>
 
 #define COMPILER_DEPENDENT_INT64   long long
 #define COMPILER_DEPENDENT_UINT64  unsigned long long
@@ -116,10 +117,8 @@ static inline void acpi_disable_pci(void)
 /* Low-level suspend routine. */
 extern int acpi_suspend_lowlevel(void);
 
-extern const unsigned char acpi_wakeup_code[];
-
-/* early initialization routine */
-extern void acpi_reserve_wakeup_memory(void);
+/* Physical address to resume after wakeup */
+#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
 
 /*
  * Check if the CPU can handle C2 and deeper
index b97596e2b68c7ea3f62eebb38cd1f155719c150e..a6983b2772201c6109060ea25d983499f0281531 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/compiler.h>
 #include <asm/alternative.h>
 
+#define BIT_64(n)                      (U64_C(1) << (n))
+
 /*
  * These have to be done with inline assembly: that way the bit-setting
  * is guaranteed to be atomic. All bit operations return 0 if the bit
index 340ee49961a61061862f892526d70a2f183b314c..f91e80f4f180bdba4e123835a99d4f3a5fc93973 100644 (file)
 #define X86_FEATURE_XSAVEOPT   (7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN                (7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS                (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS                (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM     (7*32+ 7) /* Digital Thermal Sensor */
 #define X86_FEATURE_HW_PSTATE  (7*32+ 8) /* AMD HW-PState */
 
 /* Virtualization flags: Linux defined, word 8 */
index 18d9005d9e4f014a8b0cb5739dce08dbc0f82d6d..b0767bc08740594380b6bbc8d734984b54522be4 100644 (file)
@@ -34,7 +34,7 @@
 
 #ifndef __ASSEMBLY__
 extern void mcount(void);
-extern int modifying_ftrace_code;
+extern atomic_t modifying_ftrace_code;
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
index 0e3793b821ef4027c88f60cd2f7407277d034325..dc580c42851c223a1f21142bbb3a0b8983e18975 100644 (file)
@@ -54,6 +54,20 @@ struct nmiaction {
        __register_nmi_handler((t), &fn##_na);  \
 })
 
+/*
+ * For special handlers that register/unregister in the
+ * init section only.  This should be considered rare.
+ */
+#define register_nmi_handler_initonly(t, fn, fg, n)            \
+({                                                     \
+       static struct nmiaction fn##_na __initdata = {          \
+               .handler = (fn),                        \
+               .name = (n),                            \
+               .flags = (fg),                          \
+       };                                              \
+       __register_nmi_handler((t), &fn##_na);  \
+})
+
 int __register_nmi_handler(unsigned int, struct nmiaction *);
 
 void unregister_nmi_handler(unsigned int, const char *);
index 43876f16caf1ca8981288089d57b362e017bafbf..cb00ccc7d571f2b5245f935bde849c5c5e289635 100644 (file)
@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
  * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
  * operations.
  *
- * Without THP if the mmap_sem is hold for reading, the
- * pmd can only transition from null to not null while pmd_read_atomic runs.
- * So there's no need of literally reading it atomically.
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
  *
  * With THP if the mmap_sem is hold for reading, the pmd can become
- * THP or null or point to a pte (and in turn become "stable") at any
- * time under pmd_read_atomic, so it's mandatory to read it atomically
- * with cmpxchg8b.
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
  */
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 {
        pmdval_t ret;
@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 
        return (pmd_t) { ret };
 }
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
-       return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
index 99f262e04b91b6d553fd65bd61957bd9cb5cbd15..8e525059e7d81c0a4cd46dfa2f62695daba80fee 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index ada93b3b8c66fb79f5e3a9fe91d9f15d67234edf..beff97f7df3790d04dfba1906fe657b1d024e579 100644 (file)
@@ -7,8 +7,6 @@
 
 #include <asm/processor-flags.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 #define __FIX_EFLAGS   (X86_EFLAGS_AC | X86_EFLAGS_OF | \
                         X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
                         X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
index 5c25de07cba82fca1fbd027a3038f5f6cedf66ec..89f794f007ec1e4aa5bbd029bcb32182fffe1f48 100644 (file)
@@ -248,7 +248,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 
 static inline bool is_ia32_task(void)
index 04cd6882308e5d06b685bea5ef76ff1f863d67da..e1f3a17034fce61ac64e2ff54640fcc31ec0f582 100644 (file)
@@ -33,9 +33,8 @@
 #define segment_eq(a, b)       ((a).seg == (b).seg)
 
 #define user_addr_max() (current_thread_info()->addr_limit.seg)
-#define __addr_ok(addr)                                        \
-       ((unsigned long __force)(addr) <                \
-        (current_thread_info()->addr_limit.seg))
+#define __addr_ok(addr)        \
+       ((unsigned long __force)(addr) < user_addr_max())
 
 /*
  * Test whether a block of memory is a valid user space address.
  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
  */
 
-#define __range_not_ok(addr, size)                                     \
+#define __range_not_ok(addr, size, limit)                              \
 ({                                                                     \
        unsigned long flag, roksum;                                     \
        __chk_user_ptr(addr);                                           \
        asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"             \
            : "=&r" (flag), "=r" (roksum)                               \
            : "1" (addr), "g" ((long)(size)),                           \
-             "rm" (current_thread_info()->addr_limit.seg));            \
+             "rm" (limit));                                            \
        flag;                                                           \
 })
 
@@ -77,7 +76,8 @@
  * checks that the pointer is in the user space range - after calling
  * this function, memory access functions may still return -EFAULT.
  */
-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
+#define access_ok(type, addr, size) \
+       (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
 
 /*
  * The exception table consists of pairs of addresses relative to the
index becf47b81735ef6731e0271d0de7c0d939a389a9..6149b476d9dffe06bcd1e3e3136bc335fd3dbf98 100644 (file)
 /* 4 bits of software ack period */
 #define UV2_ACK_MASK                   0x7UL
 #define UV2_ACK_UNITS_SHFT             3
-#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
 #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
 
 /*
index 8afb693198155c705c3e4bb42e1ba12a69427159..b2297e58c6ed27c418fe9d5e9f00bf1e0923c833 100644 (file)
@@ -422,12 +422,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
                return 0;
        }
 
-       if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+       if (intsrc->source_irq == 0) {
                if (acpi_skip_timer_override) {
-                       printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+                       printk(PREFIX "BIOS IRQ0 override ignored.\n");
                        return 0;
                }
-               if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+               if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+                       && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
                        intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
                        printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
                }
@@ -1334,17 +1336,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
 }
 
 /*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
  */
 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
 {
-       /*
-        * The ati_ixp4x0_rev() early PCI quirk should have set
-        * the acpi_skip_timer_override flag already:
-        */
        if (!acpi_skip_timer_override) {
-               WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
-               pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+               pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
                        d->ident);
                acpi_skip_timer_override = 1;
        }
@@ -1438,7 +1435,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
         * is enabled.  This input is incorrectly designated the
         * ISA IRQ 0 via an interrupt source override even though
         * it is wired to the output of the master 8259A and INTIN0
-        * is not connected at all.  Force ignoring BIOS IRQ0 pin2
+        * is not connected at all.  Force ignoring BIOS IRQ0
         * override in that cases.
         */
        {
@@ -1473,6 +1470,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
                     },
         },
+       {
+        .callback = dmi_ignore_irq0_timer_override,
+        .ident = "FUJITSU SIEMENS",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+                    },
+        },
        {}
 };
 
index 6e76c191a83572c07bf86c9bbc7c129c4bbc912c..d5fd66f0d4cd01ea1420180af7a1ac0be2313045 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/bitops.h>
 #include <linux/ioport.h>
 #include <linux/suspend.h>
-#include <linux/kmemleak.h>
 #include <asm/e820.h>
 #include <asm/io.h>
 #include <asm/iommu.h>
@@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void)
                return 0;
        }
        memblock_reserve(addr, aper_size);
-       /*
-        * Kmemleak should not scan this block as it may not be mapped via the
-        * kernel direct mapping.
-        */
-       kmemleak_ignore(phys_to_virt(addr));
        printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
                        aper_size >> 10, addr);
        insert_aperture_resource((u32)addr, aper_size);
index ac96561d1a99ef499d8791ee736bdd85221ba4a9..5f0ff597437c17add5334629458f8c2b35ffa148 100644 (file)
@@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
        BUG_ON(!cfg->vector);
 
        vector = cfg->vector;
-       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+       for_each_cpu(cpu, cfg->domain)
                per_cpu(vector_irq, cpu)[vector] = -1;
 
        cfg->vector = 0;
@@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
 
        if (likely(!cfg->move_in_progress))
                return;
-       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+       for_each_cpu(cpu, cfg->old_domain) {
                for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
                                                                vector++) {
                        if (per_cpu(vector_irq, cpu)[vector] != irq)
index 82f29e70d05833b7b3b6e0b7789a46ef9a75c4b8..6b9333b429ba1910637bf29c63bbe0712064022e 100644 (file)
@@ -1101,14 +1101,20 @@ int is_debug_stack(unsigned long addr)
                 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
 }
 
+static DEFINE_PER_CPU(u32, debug_stack_use_ctr);
+
 void debug_stack_set_zero(void)
 {
+       this_cpu_inc(debug_stack_use_ctr);
        load_idt((const struct desc_ptr *)&nmi_idt_descr);
 }
 
 void debug_stack_reset(void)
 {
-       load_idt((const struct desc_ptr *)&idt_descr);
+       if (WARN_ON(!this_cpu_read(debug_stack_use_ctr)))
+               return;
+       if (this_cpu_dec_return(debug_stack_use_ctr) == 0)
+               load_idt((const struct desc_ptr *)&idt_descr);
 }
 
 #else  /* CONFIG_X86_64 */
index b772dd6ad45016e5e943ec37cafdef5fac03a152..da27c5d2168a74c29e3d1da16d108ef7017113c8 100644 (file)
@@ -1251,15 +1251,15 @@ void mce_log_therm_throt_event(__u64 status)
  * poller finds an MCE, poll 2x faster.  When the poller finds no more
  * errors, poll 2x slower (up to check_interval seconds).
  */
-static int check_interval = 5 * 60; /* 5 minutes */
+static unsigned long check_interval = 5 * 60; /* 5 minutes */
 
-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
+static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
 static DEFINE_PER_CPU(struct timer_list, mce_timer);
 
-static void mce_start_timer(unsigned long data)
+static void mce_timer_fn(unsigned long data)
 {
-       struct timer_list *t = &per_cpu(mce_timer, data);
-       int *n;
+       struct timer_list *t = &__get_cpu_var(mce_timer);
+       unsigned long iv;
 
        WARN_ON(smp_processor_id() != data);
 
@@ -1272,13 +1272,14 @@ static void mce_start_timer(unsigned long data)
         * Alert userspace if needed.  If we logged an MCE, reduce the
         * polling interval, otherwise increase the polling interval.
         */
-       n = &__get_cpu_var(mce_next_interval);
+       iv = __this_cpu_read(mce_next_interval);
        if (mce_notify_irq())
-               *n = max(*n/2, HZ/100);
+               iv = max(iv / 2, (unsigned long) HZ/100);
        else
-               *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
+               iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
+       __this_cpu_write(mce_next_interval, iv);
 
-       t->expires = jiffies + *n;
+       t->expires = jiffies + iv;
        add_timer_on(t, smp_processor_id());
 }
 
@@ -1472,9 +1473,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
                                 rdmsrl(msrs[i], val);
 
                                 /* CntP bit set? */
-                                if (val & BIT(62)) {
-                                        val &= ~BIT(62);
-                                        wrmsrl(msrs[i], val);
+                                if (val & BIT_64(62)) {
+                                       val &= ~BIT_64(62);
+                                       wrmsrl(msrs[i], val);
                                 }
                         }
 
@@ -1556,17 +1557,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
 static void __mcheck_cpu_init_timer(void)
 {
        struct timer_list *t = &__get_cpu_var(mce_timer);
-       int *n = &__get_cpu_var(mce_next_interval);
+       unsigned long iv = check_interval * HZ;
 
-       setup_timer(t, mce_start_timer, smp_processor_id());
+       setup_timer(t, mce_timer_fn, smp_processor_id());
 
        if (mce_ignore_ce)
                return;
 
-       *n = check_interval * HZ;
-       if (!*n)
+       __this_cpu_write(mce_next_interval, iv);
+       if (!iv)
                return;
-       t->expires = round_jiffies(jiffies + *n);
+       t->expires = round_jiffies(jiffies + iv);
        add_timer_on(t, smp_processor_id());
 }
 
@@ -2276,7 +2277,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
        case CPU_DOWN_FAILED_FROZEN:
                if (!mce_ignore_ce && check_interval) {
                        t->expires = round_jiffies(jiffies +
-                                          __get_cpu_var(mce_next_interval));
+                                       per_cpu(mce_next_interval, cpu));
                        add_timer_on(t, cpu);
                }
                smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
index dfea390e16085225ffe82ffbdca3cf641d79ce90..c7b3fe2d72e0f71b94577fde23622a8797eca941 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
 #
 # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
 #
@@ -11,22 +11,35 @@ open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
 print OUT "#include <asm/cpufeature.h>\n\n";
 print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
 
+%features = ();
+$err = 0;
+
 while (defined($line = <IN>)) {
        if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
                $macro = $1;
-               $feature = $2;
+               $feature = "\L$2";
                $tail = $3;
                if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
-                       $feature = $1;
+                       $feature = "\L$1";
                }
 
-               if ($feature ne '') {
-                       printf OUT "\t%-32s = \"%s\",\n",
-                               "[$macro]", "\L$feature";
+               next if ($feature eq '');
+
+               if ($features{$feature}++) {
+                       print STDERR "$in: duplicate feature name: $feature\n";
+                       $err++;
                }
+               printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
        }
 }
 print OUT "};\n";
 
 close(IN);
 close(OUT);
+
+if ($err) {
+       unlink($out);
+       exit(1);
+}
+
+exit(0);
index ac140c7be396b6f55c97521ddcd572df664e95b6..bdda2e6c673bf71afb30ed670071a9d02dbdcbfa 100644 (file)
@@ -266,7 +266,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
                if (align > max_align)
                        align = max_align;
 
-               sizek = 1 << align;
+               sizek = 1UL << align;
                if (debug_print) {
                        char start_factor = 'K', size_factor = 'K';
                        unsigned long start_base, size_base;
index e049d6da01832cfc91b5e2a45b922f592c7bb7ae..c4706cf9c011d8fd068d205ed0b669142c112400 100644 (file)
@@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
                if (!cpuc->shared_regs)
                        goto error;
        }
+       cpuc->is_fake = 1;
        return cpuc;
 error:
        free_fake_cpuc(cpuc);
@@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
        dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
 }
 
+static inline int
+valid_user_frame(const void __user *fp, unsigned long size)
+{
+       return (__range_not_ok(fp, size, TASK_SIZE) == 0);
+}
+
 #ifdef CONFIG_COMPAT
 
 #include <asm/compat.h>
@@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if (bytes != sizeof(frame))
                        break;
 
-               if (fp < compat_ptr(regs->sp))
+               if (!valid_user_frame(fp, sizeof(frame)))
                        break;
 
                perf_callchain_store(entry, frame.return_address);
@@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
                if (bytes != sizeof(frame))
                        break;
 
-               if ((unsigned long)fp < regs->sp)
+               if (!valid_user_frame(fp, sizeof(frame)))
                        break;
 
                perf_callchain_store(entry, frame.return_address);
index 6638aaf5449302c2ea2d5d03073f11bbcaf5b6ba..7241e2fc3c17c87b766c35f8d412b8692436b825 100644 (file)
@@ -117,6 +117,7 @@ struct cpu_hw_events {
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
 
        unsigned int            group_flag;
+       int                     is_fake;
 
        /*
         * Intel DebugStore bits
@@ -364,6 +365,7 @@ struct x86_pmu {
        int             pebs_record_size;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
+       void            (*pebs_aliases)(struct perf_event *event);
 
        /*
         * Intel LBR
index 166546ec6aefe523a20fc5b4206d0ef9a87e4679..187c294bc6583424e8613df62a883740ab8fca1a 100644 (file)
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
        return NULL;
 }
 
-static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+static int intel_alt_er(int idx)
 {
        if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
-               return false;
+               return idx;
 
-       if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
-               event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
-               event->hw.config |= 0x01bb;
-               event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
-               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
-       } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
+       if (idx == EXTRA_REG_RSP_0)
+               return EXTRA_REG_RSP_1;
+
+       if (idx == EXTRA_REG_RSP_1)
+               return EXTRA_REG_RSP_0;
+
+       return idx;
+}
+
+static void intel_fixup_er(struct perf_event *event, int idx)
+{
+       event->hw.extra_reg.idx = idx;
+
+       if (idx == EXTRA_REG_RSP_0) {
                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
                event->hw.config |= 0x01b7;
-               event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
+       } else if (idx == EXTRA_REG_RSP_1) {
+               event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+               event->hw.config |= 0x01bb;
+               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
        }
-
-       if (event->hw.extra_reg.idx == orig_idx)
-               return false;
-
-       return true;
 }
 
 /*
@@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
        struct event_constraint *c = &emptyconstraint;
        struct er_account *era;
        unsigned long flags;
-       int orig_idx = reg->idx;
+       int idx = reg->idx;
 
-       /* already allocated shared msr */
-       if (reg->alloc)
+       /*
+        * reg->alloc can be set due to existing state, so for fake cpuc we
+        * need to ignore this, otherwise we might fail to allocate proper fake
+        * state for this extra reg constraint. Also see the comment below.
+        */
+       if (reg->alloc && !cpuc->is_fake)
                return NULL; /* call x86_get_event_constraint() */
 
 again:
-       era = &cpuc->shared_regs->regs[reg->idx];
+       era = &cpuc->shared_regs->regs[idx];
        /*
         * we use spin_lock_irqsave() to avoid lockdep issues when
         * passing a fake cpuc
@@ -1173,6 +1183,29 @@ again:
 
        if (!atomic_read(&era->ref) || era->config == reg->config) {
 
+               /*
+                * If its a fake cpuc -- as per validate_{group,event}() we
+                * shouldn't touch event state and we can avoid doing so
+                * since both will only call get_event_constraints() once
+                * on each event, this avoids the need for reg->alloc.
+                *
+                * Not doing the ER fixup will only result in era->reg being
+                * wrong, but since we won't actually try and program hardware
+                * this isn't a problem either.
+                */
+               if (!cpuc->is_fake) {
+                       if (idx != reg->idx)
+                               intel_fixup_er(event, idx);
+
+                       /*
+                        * x86_schedule_events() can call get_event_constraints()
+                        * multiple times on events in the case of incremental
+                        * scheduling(). reg->alloc ensures we only do the ER
+                        * allocation once.
+                        */
+                       reg->alloc = 1;
+               }
+
                /* lock in msr value */
                era->config = reg->config;
                era->reg = reg->reg;
@@ -1180,17 +1213,17 @@ again:
                /* one more user */
                atomic_inc(&era->ref);
 
-               /* no need to reallocate during incremental event scheduling */
-               reg->alloc = 1;
-
                /*
                 * need to call x86_get_event_constraint()
                 * to check if associated event has constraints
                 */
                c = NULL;
-       } else if (intel_try_alt_er(event, orig_idx)) {
-               raw_spin_unlock_irqrestore(&era->lock, flags);
-               goto again;
+       } else {
+               idx = intel_alt_er(idx);
+               if (idx != reg->idx) {
+                       raw_spin_unlock_irqrestore(&era->lock, flags);
+                       goto again;
+               }
        }
        raw_spin_unlock_irqrestore(&era->lock, flags);
 
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
        struct er_account *era;
 
        /*
-        * only put constraint if extra reg was actually
-        * allocated. Also takes care of event which do
-        * not use an extra shared reg
+        * Only put constraint if extra reg was actually allocated. Also takes
+        * care of event which do not use an extra shared reg.
+        *
+        * Also, if this is a fake cpuc we shouldn't touch any event state
+        * (reg->alloc) and we don't care about leaving inconsistent cpuc state
+        * either since it'll be thrown out.
         */
-       if (!reg->alloc)
+       if (!reg->alloc || cpuc->is_fake)
                return;
 
        era = &cpuc->shared_regs->regs[reg->idx];
@@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
        intel_put_shared_regs_event_constraints(cpuc, event);
 }
 
-static int intel_pmu_hw_config(struct perf_event *event)
+static void intel_pebs_aliases_core2(struct perf_event *event)
 {
-       int ret = x86_pmu_hw_config(event);
-
-       if (ret)
-               return ret;
-
-       if (event->attr.precise_ip &&
-           (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+       if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
                /*
                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
                 * (0x003c) so that we can use it with PEBS.
@@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event)
                 */
                u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
 
+               alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
+               event->hw.config = alt_config;
+       }
+}
+
+static void intel_pebs_aliases_snb(struct perf_event *event)
+{
+       if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+               /*
+                * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
+                * (0x003c) so that we can use it with PEBS.
+                *
+                * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
+                * PEBS capable. However we can use UOPS_RETIRED.ALL
+                * (0x01c2), which is a PEBS capable event, to get the same
+                * count.
+                *
+                * UOPS_RETIRED.ALL counts the number of cycles that retires
+                * CNTMASK micro-ops. By setting CNTMASK to a value (16)
+                * larger than the maximum number of micro-ops that can be
+                * retired per cycle (4) and then inverting the condition, we
+                * count all cycles that retire 16 or less micro-ops, which
+                * is every cycle.
+                *
+                * Thereby we gain a PEBS capable cycle counter.
+                */
+               u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
 
                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
                event->hw.config = alt_config;
        }
+}
+
+static int intel_pmu_hw_config(struct perf_event *event)
+{
+       int ret = x86_pmu_hw_config(event);
+
+       if (ret)
+               return ret;
+
+       if (event->attr.precise_ip && x86_pmu.pebs_aliases)
+               x86_pmu.pebs_aliases(event);
 
        if (intel_pmu_needs_lbr_smpl(event)) {
                ret = intel_pmu_setup_lbr_filter(event);
@@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = {
        .max_period             = (1ULL << 31) - 1,
        .get_event_constraints  = intel_get_event_constraints,
        .put_event_constraints  = intel_put_event_constraints,
+       .pebs_aliases           = intel_pebs_aliases_core2,
 
        .format_attrs           = intel_arch3_formats_attr,
 
@@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void)
                break;
 
        case 42: /* SandyBridge */
-               x86_add_quirk(intel_sandybridge_quirk);
        case 45: /* SandyBridge, "Romely-EP" */
+               x86_add_quirk(intel_sandybridge_quirk);
+       case 58: /* IvyBridge */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void)
 
                x86_pmu.event_constraints = intel_snb_event_constraints;
                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+               x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
                x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
index 5a3edc27f6e5754f758e5ff166f89b34d6d99a01..35e2192df9f4ae078ae8ecff742f72f30881816b 100644 (file)
@@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
        INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8),    /* MEM_TRANS_RETIRED.* */
-       INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
-       INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
-       INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
-       INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
        INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
index addf9e82a7f23bf063d4b6205307ad5f0dc5e7e7..ee8e9abc859f8a20a695c69c9d834743ff933036 100644 (file)
@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        const struct cpuid_bit *cb;
 
        static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
-               { X86_FEATURE_DTS,              CR_EAX, 0, 0x00000006, 0 },
+               { X86_FEATURE_DTHERM,           CR_EAX, 0, 0x00000006, 0 },
                { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006, 0 },
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
index 01ccf9b71473ce18ffe0cccf5fa09fd379dcf4cf..623f288374763286ec9e58aa66ecabb4e3fc2f6b 100644 (file)
@@ -316,7 +316,6 @@ ret_from_exception:
        preempt_stop(CLBR_ANY)
 ret_from_intr:
        GET_THREAD_INFO(%ebp)
-resume_userspace_sig:
 #ifdef CONFIG_VM86
        movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
        movb PT_CS(%esp), %al
@@ -615,9 +614,13 @@ work_notifysig:                            # deal with pending signals and
                                        # vm86-space
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
+       movb PT_CS(%esp), %bl
+       andb $SEGMENT_RPL_MASK, %bl
+       cmpb $USER_RPL, %bl
+       jb resume_kernel
        xorl %edx, %edx
        call do_notify_resume
-       jmp resume_userspace_sig
+       jmp resume_userspace
 
        ALIGN
 work_notifysig_v86:
@@ -630,9 +633,13 @@ work_notifysig_v86:
 #endif
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
+       movb PT_CS(%esp), %bl
+       andb $SEGMENT_RPL_MASK, %bl
+       cmpb $USER_RPL, %bl
+       jb resume_kernel
        xorl %edx, %edx
        call do_notify_resume
-       jmp resume_userspace_sig
+       jmp resume_userspace
 END(work_pending)
 
        # perform syscall exit tracing
index 320852d02026171d537b58bd95868113fa458d10..7d65133b51bede19fc529fd82691de2c01926f60 100644 (file)
@@ -190,6 +190,44 @@ ENDPROC(native_usergs_sysret64)
 #endif
 .endm
 
+/*
+ * When dynamic function tracer is enabled it will add a breakpoint
+ * to all locations that it is about to modify, sync CPUs, update
+ * all the code, sync CPUs, then remove the breakpoints. In this time
+ * if lockdep is enabled, it might jump back into the debug handler
+ * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
+ *
+ * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
+ * make sure the stack pointer does not get reset back to the top
+ * of the debug stack, and instead just reuses the current stack.
+ */
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
+
+.macro TRACE_IRQS_OFF_DEBUG
+       call debug_stack_set_zero
+       TRACE_IRQS_OFF
+       call debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_ON_DEBUG
+       call debug_stack_set_zero
+       TRACE_IRQS_ON
+       call debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
+       bt   $9,EFLAGS-\offset(%rsp)    /* interrupts off? */
+       jnc  1f
+       TRACE_IRQS_ON_DEBUG
+1:
+.endm
+
+#else
+# define TRACE_IRQS_OFF_DEBUG          TRACE_IRQS_OFF
+# define TRACE_IRQS_ON_DEBUG           TRACE_IRQS_ON
+# define TRACE_IRQS_IRETQ_DEBUG                TRACE_IRQS_IRETQ
+#endif
+
 /*
  * C code is not supposed to know about undefined top of stack. Every time
  * a C function with an pt_regs argument is called from the SYSCALL based
@@ -1098,7 +1136,7 @@ ENTRY(\sym)
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call save_paranoid
-       TRACE_IRQS_OFF
+       TRACE_IRQS_OFF_DEBUG
        movq %rsp,%rdi          /* pt_regs pointer */
        xorl %esi,%esi          /* no error code */
        subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
@@ -1393,7 +1431,7 @@ paranoidzeroentry machine_check *machine_check_vector(%rip)
 ENTRY(paranoid_exit)
        DEFAULT_FRAME
        DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
+       TRACE_IRQS_OFF_DEBUG
        testl %ebx,%ebx                         /* swapgs needed? */
        jnz paranoid_restore
        testl $3,CS(%rsp)
@@ -1404,7 +1442,7 @@ paranoid_swapgs:
        RESTORE_ALL 8
        jmp irq_return
 paranoid_restore:
-       TRACE_IRQS_IRETQ 0
+       TRACE_IRQS_IRETQ_DEBUG 0
        RESTORE_ALL 8
        jmp irq_return
 paranoid_userspace:
index 32ff36596ab10d65d8d5050402eb8d24a5f3fb6f..c3a7cb4bf6e6f0f429495d8f4a9478ac9161c0d6 100644 (file)
@@ -100,7 +100,7 @@ static const unsigned char *ftrace_nop_replace(void)
 }
 
 static int
-ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
                   unsigned const char *new_code)
 {
        unsigned char replaced[MCOUNT_INSN_SIZE];
@@ -141,7 +141,20 @@ int ftrace_make_nop(struct module *mod,
        old = ftrace_call_replace(ip, addr);
        new = ftrace_nop_replace();
 
-       return ftrace_modify_code(rec->ip, old, new);
+       /*
+        * On boot up, and when modules are loaded, the MCOUNT_ADDR
+        * is converted to a nop, and will never become MCOUNT_ADDR
+        * again. This code is either running before SMP (on boot up)
+        * or before the code will ever be executed (module load).
+        * We do not want to use the breakpoint version in this case,
+        * just modify the code directly.
+        */
+       if (addr == MCOUNT_ADDR)
+               return ftrace_modify_code_direct(rec->ip, old, new);
+
+       /* Normal cases use add_brk_on_nop */
+       WARN_ONCE(1, "invalid use of ftrace_make_nop");
+       return -EINVAL;
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -152,9 +165,47 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        old = ftrace_nop_replace();
        new = ftrace_call_replace(ip, addr);
 
-       return ftrace_modify_code(rec->ip, old, new);
+       /* Should only be called when module is loaded */
+       return ftrace_modify_code_direct(rec->ip, old, new);
 }
 
+/*
+ * The modifying_ftrace_code is used to tell the breakpoint
+ * handler to call ftrace_int3_handler(). If it fails to
+ * call this handler for a breakpoint added by ftrace, then
+ * the kernel may crash.
+ *
+ * As atomic_writes on x86 do not need a barrier, we do not
+ * need to add smp_mb()s for this to work. It is also considered
+ * that we can not read the modifying_ftrace_code before
+ * executing the breakpoint. That would be quite remarkable if
+ * it could do that. Here's the flow that is required:
+ *
+ *   CPU-0                          CPU-1
+ *
+ * atomic_inc(mfc);
+ * write int3s
+ *                             <trap-int3> // implicit (r)mb
+ *                             if (atomic_read(mfc))
+ *                                     call ftrace_int3_handler()
+ *
+ * Then when we are finished:
+ *
+ * atomic_dec(mfc);
+ *
+ * If we hit a breakpoint that was not set by ftrace, it does not
+ * matter if ftrace_int3_handler() is called or not. It will
+ * simply be ignored. But it is crucial that a ftrace nop/caller
+ * breakpoint is handled. No other user should ever place a
+ * breakpoint on an ftrace nop/caller location. It must only
+ * be done by this code.
+ */
+atomic_t modifying_ftrace_code __read_mostly;
+
+static int
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+                  unsigned const char *new_code);
+
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
        unsigned long ip = (unsigned long)(&ftrace_call);
@@ -163,13 +214,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 
        memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
        new = ftrace_call_replace(ip, (unsigned long)func);
+
+       /* See comment above by declaration of modifying_ftrace_code */
+       atomic_inc(&modifying_ftrace_code);
+
        ret = ftrace_modify_code(ip, old, new);
 
+       atomic_dec(&modifying_ftrace_code);
+
        return ret;
 }
 
-int modifying_ftrace_code __read_mostly;
-
 /*
  * A breakpoint was added to the code address we are about to
  * modify, and this is the handle that will just skip over it.
@@ -489,13 +544,46 @@ void ftrace_replace_code(int enable)
        }
 }
 
+static int
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+                  unsigned const char *new_code)
+{
+       int ret;
+
+       ret = add_break(ip, old_code);
+       if (ret)
+               goto out;
+
+       run_sync();
+
+       ret = add_update_code(ip, new_code);
+       if (ret)
+               goto fail_update;
+
+       run_sync();
+
+       ret = ftrace_write(ip, new_code, 1);
+       if (ret) {
+               ret = -EPERM;
+               goto out;
+       }
+       run_sync();
+ out:
+       return ret;
+
+ fail_update:
+       probe_kernel_write((void *)ip, &old_code[0], 1);
+       goto out;
+}
+
 void arch_ftrace_update_code(int command)
 {
-       modifying_ftrace_code++;
+       /* See comment above by declaration of modifying_ftrace_code */
+       atomic_inc(&modifying_ftrace_code);
 
        ftrace_modify_all_code(command);
 
-       modifying_ftrace_code--;
+       atomic_dec(&modifying_ftrace_code);
 }
 
 int __init ftrace_dyn_arch_init(void *data)
index 9cc7b4392f7c8b0462ad4d031667e5197eff9373..1460a5df92f7a7f314ed0be95a81765cf2df676e 100644 (file)
@@ -870,7 +870,7 @@ int __init hpet_enable(void)
        else
                pr_warn("HPET initial state will not be saved\n");
        cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
-       hpet_writel(cfg, HPET_Tn_CFG(i));
+       hpet_writel(cfg, HPET_CFG);
        if (cfg)
                pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
                        cfg);
index 8bfb6146f7530634d30fa9f25d1718111a214649..3f61904365cff214d26efa4a378830559e39a7f9 100644 (file)
@@ -444,12 +444,12 @@ void kgdb_roundup_cpus(unsigned long flags)
 
 /**
  *     kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- *     @vector: The error vector of the exception that happened.
+ *     @e_vector: The error vector of the exception that happened.
  *     @signo: The signal number of the exception that happened.
  *     @err_code: The error code of the exception that happened.
- *     @remcom_in_buffer: The buffer of the packet we have read.
- *     @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- *     @regs: The &struct pt_regs of the current process.
+ *     @remcomInBuffer: The buffer of the packet we have read.
+ *     @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
+ *     @linux_regs: The &struct pt_regs of the current process.
  *
  *     This function MUST handle the 'c' and 's' command packets,
  *     as well packets to set / remove a hardware breakpoint, if used.
index 086eb58c6e801134296372acd7d9efb36de6d12b..f1b42b3a186c7ec7594203b0e58c31e424181908 100644 (file)
@@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void)
        bool ret = false;
        struct pvclock_vcpu_time_info *src;
 
-       /*
-        * per_cpu() is safe here because this function is only called from
-        * timer functions where preemption is already disabled.
-        */
-       WARN_ON(!in_atomic());
        src = &__get_cpu_var(hv_clock);
        if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
                __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
index 90875279ef3d56bb04fad7262fb9ea98e2c61dcf..a0b2f84457bebfb88de00f547c9b8db930abca5a 100644 (file)
@@ -444,14 +444,16 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
         */
        if (unlikely(is_debug_stack(regs->sp))) {
                debug_stack_set_zero();
-               __get_cpu_var(update_debug_stack) = 1;
+               this_cpu_write(update_debug_stack, 1);
        }
 }
 
 static inline void nmi_nesting_postprocess(void)
 {
-       if (unlikely(__get_cpu_var(update_debug_stack)))
+       if (unlikely(this_cpu_read(update_debug_stack))) {
                debug_stack_reset();
+               this_cpu_write(update_debug_stack, 0);
+       }
 }
 #endif
 
index e31bf8d5c4d2e410126f397cdb764c281b1693fc..149b8d9c6ad45a199cfa1f55b17df3562afe4127 100644 (file)
@@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs)
 static void __init init_nmi_testsuite(void)
 {
        /* trap all the unknown NMIs we may generate */
-       register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
+       register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
 }
 
 static void __init cleanup_nmi_testsuite(void)
@@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
 {
        unsigned long timeout;
 
-       if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
+       if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
                                 NMI_FLAG_FIRST, "nmi_selftest")) {
                nmi_fail = FAILURE;
                return;
index 62c9457ccd2f1c4ccad62a757aebdcd520498cc3..c0f420f76cd3b7a65d65536735a1bee1820f89b3 100644 (file)
@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                 struct dma_attrs *attrs)
 {
        unsigned long dma_mask;
-       struct page *page = NULL;
+       struct page *page;
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        dma_addr_t addr;
 
@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 
        flag |= __GFP_ZERO;
 again:
+       page = NULL;
        if (!(flag & GFP_ATOMIC))
                page = dma_alloc_from_contiguous(dev, count, get_order(size));
        if (!page)
index 13b1990c7c5839e96d5f5fac0951c48eb3430c9a..c4c6a5c2bf0f393ffa8588a1fa7376bcaa9513bb 100644 (file)
@@ -1211,12 +1211,6 @@ static long x32_arch_ptrace(struct task_struct *child,
                                             0, sizeof(struct user_i387_struct),
                                             datap);
 
-               /* normal 64bit interface to access TLS data.
-                  Works just like arch_prctl, except that the arguments
-                  are reversed. */
-       case PTRACE_ARCH_PRCTL:
-               return do_arch_prctl(child, data, addr);
-
        default:
                return compat_ptrace_request(child, request, addr, data);
        }
index 79c45af81604c7e191116c23b3625c80bb81476d..5de92f1abd76fc4d055a42af0c690284c65b7f2d 100644 (file)
@@ -451,6 +451,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
                },
        },
+       {       /* Handle problems with rebooting on the Precision M6600. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+               },
+       },
        { }
 };
 
@@ -639,9 +647,11 @@ void native_machine_shutdown(void)
        set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
 
        /*
-        * O.K Now that I'm on the appropriate processor,
-        * stop all of the others.
+        * O.K Now that I'm on the appropriate processor, stop all of the
+        * others. Also disable the local irq to not receive the per-cpu
+        * timer interrupt which may trigger scheduler's load balance.
         */
+       local_irq_disable();
        stop_other_cpus();
 #endif
 
index 965dfda0fd5e442fa88a13e5aeff0a9b98e73029..21af737053aad05fb726a0e1023dfad428064518 100644 (file)
@@ -555,7 +555,6 @@ unsigned long sys_sigreturn(struct pt_regs *regs)
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, &ax))
@@ -581,7 +580,6 @@ long sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
@@ -647,42 +645,28 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                struct pt_regs *regs)
 {
        int usig = signr_convert(sig);
-       sigset_t *set = &current->blocked;
-       int ret;
-
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               set = &current->saved_sigmask;
+       sigset_t *set = sigmask_to_save();
 
        /* Set up the stack frame */
        if (is_ia32) {
                if (ka->sa.sa_flags & SA_SIGINFO)
-                       ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
+                       return ia32_setup_rt_frame(usig, ka, info, set, regs);
                else
-                       ret = ia32_setup_frame(usig, ka, set, regs);
+                       return ia32_setup_frame(usig, ka, set, regs);
 #ifdef CONFIG_X86_X32_ABI
        } else if (is_x32) {
-               ret = x32_setup_rt_frame(usig, ka, info,
+               return x32_setup_rt_frame(usig, ka, info,
                                         (compat_sigset_t *)set, regs);
 #endif
        } else {
-               ret = __setup_rt_frame(sig, ka, info, set, regs);
-       }
-
-       if (ret) {
-               force_sigsegv(sig, current);
-               return -EFAULT;
+               return __setup_rt_frame(sig, ka, info, set, regs);
        }
-
-       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-       return ret;
 }
 
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
                struct pt_regs *regs)
 {
-       int ret;
-
        /* Are we from a system call? */
        if (syscall_get_nr(current, regs) >= 0) {
                /* If so, check system call restarting.. */
@@ -713,10 +697,10 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
            likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
                regs->flags &= ~X86_EFLAGS_TF;
 
-       ret = setup_rt_frame(sig, ka, info, regs);
-
-       if (ret)
-               return ret;
+       if (setup_rt_frame(sig, ka, info, regs) < 0) {
+               force_sigsegv(sig, current);
+               return;
+       }
 
        /*
         * Clear the direction flag as per the ABI for function entry.
@@ -731,12 +715,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
         */
        regs->flags &= ~X86_EFLAGS_TF;
 
-       block_sigmask(ka, sig);
-
-       tracehook_signal_handler(sig, info, ka, regs,
-                                test_thread_flag(TIF_SINGLESTEP));
-
-       return 0;
+       signal_delivered(sig, info, ka, regs,
+                        test_thread_flag(TIF_SINGLESTEP));
 }
 
 #ifdef CONFIG_X86_32
@@ -757,16 +737,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
 
-       /*
-        * We want the common case to go fast, which is why we may in certain
-        * cases get here from kernel mode. Just return without doing anything
-        * if so.
-        * X86_32: vm86 regs switched out by assembly code before reaching
-        * here, so testing against kernel CS suffices.
-        */
-       if (!user_mode(regs))
-               return;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee! Actually deliver the signal.  */
@@ -796,10 +766,7 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -827,8 +794,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
        if (thread_info_flags & _TIF_USER_RETURN_NOTIFY)
                fire_user_return_notifiers();
@@ -936,7 +901,6 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
index f56f96da77f57e011b64e3e69cbabdc76ed3d442..7bd8a0823654115cdb476b1693cb3eaeb6affba3 100644 (file)
@@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 
 static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 {
-       if (c->phys_proc_id == o->phys_proc_id)
-               return topology_sane(c, o, "mc");
+       if (c->phys_proc_id == o->phys_proc_id) {
+               if (cpu_has(c, X86_FEATURE_AMD_DCM))
+                       return true;
 
+               return topology_sane(c, o, "mc");
+       }
        return false;
 }
 
@@ -382,6 +385,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                if ((i == cpu) || (has_mc && match_llc(c, o)))
                        link_mask(llc_shared, cpu, i);
 
+       }
+
+       /*
+        * This needs a separate iteration over the cpus because we rely on all
+        * cpu_sibling_mask links to be set-up.
+        */
+       for_each_cpu(i, cpu_sibling_setup_mask) {
+               o = &cpu_data(i);
+
                if ((i == cpu) || (has_mc && match_mc(c, o))) {
                        link_mask(core, cpu, i);
 
@@ -410,15 +422,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
 /* maps the cpu to the sched domain representing multi-core */
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-       /*
-        * For perf, we return last level cache shared map.
-        * And for power savings, we return cpu_core_map
-        */
-       if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
-               return cpu_core_mask(cpu);
-       else
-               return cpu_llc_shared_mask(cpu);
+       return cpu_llc_shared_mask(cpu);
 }
 
 static void impress_friends(void)
index ff08457a025da1c6a15a5b18ce639f0eb80476eb..05b31d92f69cdf7b7e9ccc22c82835a5e6286bbc 100644 (file)
@@ -303,8 +303,12 @@ gp_in_kernel:
 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
 {
 #ifdef CONFIG_DYNAMIC_FTRACE
-       /* ftrace must be first, everything else may cause a recursive crash */
-       if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs))
+       /*
+        * ftrace must be first, everything else may cause a recursive crash.
+        * See note by declaration of modifying_ftrace_code in ftrace.c
+        */
+       if (unlikely(atomic_read(&modifying_ftrace_code)) &&
+           ftrace_int3_handler(regs))
                return;
 #endif
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
index 7515cf0e1805eae308e1dd0650b1dd33a8d8564d..5db36caf4289cdab3fe04bf899f4826504ced389 100644 (file)
@@ -139,6 +139,19 @@ static int addr_to_vsyscall_nr(unsigned long addr)
        return nr;
 }
 
+#ifdef CONFIG_SECCOMP
+static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
+{
+       if (!seccomp_mode(&tsk->seccomp))
+               return 0;
+       task_pt_regs(tsk)->orig_ax = syscall_nr;
+       task_pt_regs(tsk)->ax = syscall_nr;
+       return __secure_computing(syscall_nr);
+}
+#else
+#define vsyscall_seccomp(_tsk, _nr) 0
+#endif
+
 static bool write_ok_or_segv(unsigned long ptr, size_t size)
 {
        /*
@@ -174,6 +187,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        int vsyscall_nr;
        int prev_sig_on_uaccess_error;
        long ret;
+       int skip;
 
        /*
         * No point in checking CS -- the only way to get here is a user mode
@@ -205,9 +219,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        }
 
        tsk = current;
-       if (seccomp_mode(&tsk->seccomp))
-               do_exit(SIGKILL);
-
        /*
         * With a real vsyscall, page faults cause SIGSEGV.  We want to
         * preserve that behavior to make writing exploits harder.
@@ -222,8 +233,13 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
         * address 0".
         */
        ret = -EFAULT;
+       skip = 0;
        switch (vsyscall_nr) {
        case 0:
+               skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
                    !write_ok_or_segv(regs->si, sizeof(struct timezone)))
                        break;
@@ -234,6 +250,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 1:
+               skip = vsyscall_seccomp(tsk, __NR_time);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(time_t)))
                        break;
 
@@ -241,6 +261,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 2:
+               skip = vsyscall_seccomp(tsk, __NR_getcpu);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
                    !write_ok_or_segv(regs->si, sizeof(unsigned)))
                        break;
@@ -253,6 +277,12 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 
+       if (skip) {
+               if ((long)regs->ax <= 0L) /* seccomp errno emulation */
+                       goto do_ret;
+               goto done; /* seccomp trace/trap */
+       }
+
        if (ret == -EFAULT) {
                /* Bad news -- userspace fed a bad pointer to a vsyscall. */
                warn_bad_vsyscall(KERN_INFO, regs,
@@ -271,10 +301,11 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        regs->ax = ret;
 
+do_ret:
        /* Emulate a ret instruction. */
        regs->ip = caller;
        regs->sp += 8;
-
+done:
        return true;
 
 sigsegv:
index 72102e0ab7cb3a0ae2302aa10eadd6bdb73d939a..57e168e27b5b865e187d5ee3d34413189a1c5905 100644 (file)
@@ -2595,8 +2595,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
                        *gfnp = gfn;
                        kvm_release_pfn_clean(pfn);
                        pfn &= ~mask;
-                       if (!get_page_unless_zero(pfn_to_page(pfn)))
-                               BUG();
+                       kvm_get_pfn(pfn);
                        *pfnp = pfn;
                }
        }
@@ -3935,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
 {
        struct kvm_mmu_page *page;
 
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               return;
+
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
        kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
index 459b58a8a15cecd12c52ca0b48d19c0b57e4a4f0..25b7ae8d058ad8b756987ed7dc8d22382a956519 100644 (file)
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
  * @src: source address
  * @dst: destination address
  * @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
+ * @sum: initial sum that is added into the result (32bit unfolded)
  *
  * Returns an 32bit unfolded checksum of the buffer.
  */
index f61ee67ec00f0dc6d61b6165e087ba248f81df2c..4f74d94c8d9727f013b3ebed6fe5da6e3975ac93 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 
 #include <asm/word-at-a-time.h>
+#include <linux/sched.h>
 
 /*
  * best effort, GUP based copy_from_user() that is NMI-safe
@@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        void *map;
        int ret;
 
+       if (__range_not_ok(from, n, TASK_SIZE))
+               return len;
+
        do {
                ret = __get_user_pages_fast(addr, 1, 0, &page);
                if (!ret)
index 819137904428a4f82444cf5a9d5055bb59dc6d80..5d7e51f3fd2812ed4176c696d9b175d4ec52fe46 100644 (file)
@@ -28,7 +28,7 @@
 #  - (66): the last prefix is 0x66
 #  - (F3): the last prefix is 0xF3
 #  - (F2): the last prefix is 0xF2
-#
+#  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
 
 Table: one byte opcode
 Referrer:
@@ -515,12 +515,12 @@ b4: LFS Gv,Mp
 b5: LGS Gv,Mp
 b6: MOVZX Gv,Eb
 b7: MOVZX Gv,Ew
-b8: JMPE | POPCNT Gv,Ev (F3)
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
 b9: Grp10 (1A)
 ba: Grp8 Ev,Ib (1A)
 bb: BTC Ev,Gv
-bc: BSF Gv,Ev | TZCNT Gv,Ev (F3)
-bd: BSR Gv,Ev | LZCNT Gv,Ev (F3)
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
 be: MOVSX Gv,Eb
 bf: MOVSX Gv,Ew
 # 0x0f 0xc0-0xcf
index 97141c26a13ac8400cfce07c36d55cca50962b26..bc4e9d84157fc0c9aa85e71837998da166c1fa57 100644 (file)
@@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
                extra += PMD_SIZE;
 #endif
                /* The first 2/4M doesn't use large pages. */
-               extra += mr->end - mr->start;
+               if (mr->start < PMD_SIZE)
+                       extra += mr->end - mr->start;
 
                ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
        } else
index be1ef574ce9a7a933c22133be78f6a24bac6a80c..78fe3f1ac49f6d278687337f1887e360787ac3a2 100644 (file)
@@ -180,7 +180,7 @@ err_free_memtype:
 
 /**
  * ioremap_nocache     -   map bus memory into CPU space
- * @offset:    bus address of the memory
+ * @phys_addr:    bus address of the memory
  * @size:      size of the resource to map
  *
  * ioremap_nocache performs a platform specific sequence of operations to
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache);
 
 /**
  * ioremap_wc  -       map memory into CPU space write combined
- * @offset:    bus address of the memory
+ * @phys_addr: bus address of the memory
  * @size:      size of the resource to map
  *
  * This version of ioremap ensures that the memory is marked write combining.
index e1ebde3152104840961fe6e953b28d628f4bab82..a718e0d23503fdc4bb3149d4ad5c7046458f2a57 100644 (file)
@@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
 
 /**
  * clflush_cache_range - flush a cache range with clflush
- * @addr:      virtual start address
+ * @vaddr:     virtual start address
  * @size:      number of bytes to flush
  *
  * clflush is an unordered instruction which needs fencing with mfence
index f11729fd019c6899e61e2989ed17a1619a46239d..3d68ef6d2266cb66b3d07c578191b80c5348e0e2 100644 (file)
@@ -158,31 +158,47 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
        return req_type;
 }
 
+struct pagerange_state {
+       unsigned long           cur_pfn;
+       int                     ram;
+       int                     not_ram;
+};
+
+static int
+pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
+{
+       struct pagerange_state *state = arg;
+
+       state->not_ram  |= initial_pfn > state->cur_pfn;
+       state->ram      |= total_nr_pages > 0;
+       state->cur_pfn   = initial_pfn + total_nr_pages;
+
+       return state->ram && state->not_ram;
+}
+
 static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
 {
-       int ram_page = 0, not_rampage = 0;
-       unsigned long page_nr;
+       int ret = 0;
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       struct pagerange_state state = {start_pfn, 0, 0};
 
-       for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
-            ++page_nr) {
-               /*
-                * For legacy reasons, physical address range in the legacy ISA
-                * region is tracked as non-RAM. This will allow users of
-                * /dev/mem to map portions of legacy ISA region, even when
-                * some of those portions are listed(or not even listed) with
-                * different e820 types(RAM/reserved/..)
-                */
-               if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
-                   page_is_ram(page_nr))
-                       ram_page = 1;
-               else
-                       not_rampage = 1;
-
-               if (ram_page == not_rampage)
-                       return -1;
+       /*
+        * For legacy reasons, physical address range in the legacy ISA
+        * region is tracked as non-RAM. This will allow users of
+        * /dev/mem to map portions of legacy ISA region, even when
+        * some of those portions are listed(or not even listed) with
+        * different e820 types(RAM/reserved/..)
+        */
+       if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
+               start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
+
+       if (start_pfn < end_pfn) {
+               ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
+                               &state, pagerange_is_ram_callback);
        }
 
-       return ram_page;
+       return (ret > 0) ? -1 : (state.ram ? 1 : 0);
 }
 
 /*
index 732af3a9618375da189f3da4651d75a7fbd4693b..4599c3e8bcb63f39fe618f0075ba9e52e0fbb353 100644 (file)
@@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
                return;
        }
 
+       node_set(node, numa_nodes_parsed);
+
        printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
               node, pxm,
               (unsigned long long) start, (unsigned long long) end - 1);
index 3c6e328483c7dddd2e91011eec806b78b798d542..028454f0c3a51ac86a7105d24d433e197621a95d 100644 (file)
@@ -110,19 +110,16 @@ static struct kmsg_dumper dw_dumper;
 static int dumper_registered;
 
 static void dw_kmsg_dump(struct kmsg_dumper *dumper,
-                       enum kmsg_dump_reason reason,
-                       const char *s1, unsigned long l1,
-                       const char *s2, unsigned long l2)
+                        enum kmsg_dump_reason reason)
 {
-       int i;
+       static char line[1024];
+       size_t len;
 
        /* When run to this, we'd better re-init the HW */
        mrst_early_console_init();
 
-       for (i = 0; i < l1; i++)
-               early_mrst_console.write(&early_mrst_console, s1 + i, 1);
-       for (i = 0; i < l2; i++)
-               early_mrst_console.write(&early_mrst_console, s2 + i, 1);
+       while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+               early_mrst_console.write(&early_mrst_console, line, len);
 }
 
 /* Set the ratio rate to 115200, 8n1, IRQ disabled */
index e31bcd8f2eeef2af6d2db13b824864b74867767d..fd41a9262d657ee672be6700b0dcc1fb8c147127 100644 (file)
@@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
 EXPORT_SYMBOL_GPL(intel_scu_notifier);
 
 /* Called by IPC driver */
-void intel_scu_devices_create(void)
+void __devinit intel_scu_devices_create(void)
 {
        int i;
 
index 3ae0e61abd23acf7b1de580b8c234873b1870209..59880afa851fc37d6995e3fd665fca1f1d3d91b3 100644 (file)
@@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void)
                 */
                mmr_image |= (1L << SOFTACK_MSHIFT);
                if (is_uv2_hub()) {
-                       mmr_image &= ~(1L << UV2_LEG_SHFT);
                        mmr_image |= (1L << UV2_EXT_SHFT);
                }
                write_mmr_misc_control(pnode, mmr_image);
index 29f9f0554f7de0244e7120ea69fec26640bf7dce..7a35a6e71d44332d351cdeb9ec28e96c6467c7b6 100644 (file)
 346    i386    setns                   sys_setns
 347    i386    process_vm_readv        sys_process_vm_readv            compat_sys_process_vm_readv
 348    i386    process_vm_writev       sys_process_vm_writev           compat_sys_process_vm_writev
+349    i386    kcmp                    sys_kcmp
index dd29a9ea27c560a9d2fcb6e1c2983f8b8e9be407..51171aeff0dc31483cdc6526e641459b0d64deb3 100644 (file)
 309    common  getcpu                  sys_getcpu
 310    64      process_vm_readv        sys_process_vm_readv
 311    64      process_vm_writev       sys_process_vm_writev
+312    64      kcmp                    sys_kcmp
+
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 # for native 64-bit operation.
index 5f6a5b6c3a159842b4d3efc9aa3f45abb0a605d4..ddcf39b1a18d3f8ad034237abc9adf293d46b24c 100644 (file)
@@ -66,9 +66,10 @@ BEGIN {
        rex_expr = "^REX(\\.[XRWB]+)*"
        fpu_expr = "^ESC" # TODO
 
-       lprefix1_expr = "\\(66\\)"
+       lprefix1_expr = "\\((66|!F3)\\)"
        lprefix2_expr = "\\(F3\\)"
-       lprefix3_expr = "\\(F2\\)"
+       lprefix3_expr = "\\((F2|!F3)\\)"
+       lprefix_expr = "\\((66|F2|F3)\\)"
        max_lprefix = 4
 
        # All opcodes starting with lower-case 'v' or with (v1) superscript
@@ -333,13 +334,16 @@ function convert_operands(count,opnd,       i,j,imm,mod)
                if (match(ext, lprefix1_expr)) {
                        lptable1[idx] = add_flags(lptable1[idx],flags)
                        variant = "INAT_VARIANT"
-               } else if (match(ext, lprefix2_expr)) {
+               }
+               if (match(ext, lprefix2_expr)) {
                        lptable2[idx] = add_flags(lptable2[idx],flags)
                        variant = "INAT_VARIANT"
-               } else if (match(ext, lprefix3_expr)) {
+               }
+               if (match(ext, lprefix3_expr)) {
                        lptable3[idx] = add_flags(lptable3[idx],flags)
                        variant = "INAT_VARIANT"
-               } else {
+               }
+               if (!match(ext, lprefix_expr)){
                        table[idx] = add_flags(table[idx],flags)
                }
        }
index bb0fb03b9f85f268912e7b6851f74787c8ee8e4a..a508cea135033eba3c1c7facba8aebd04a1a0983 100644 (file)
@@ -486,7 +486,6 @@ long sys_sigreturn(struct pt_regs *regs)
            copy_from_user(&set.sig[1], extramask, sig_size))
                goto segfault;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (copy_sc_from_user(&current->thread.regs, sc))
@@ -600,7 +599,6 @@ long sys_rt_sigreturn(struct pt_regs *regs)
        if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
                goto segfault;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
index 416bd40c0eba51f46863888f05637fc8794389ba..68d1dc91b37badeaeff9fe517a06df2be6484f8a 100644 (file)
@@ -39,9 +39,9 @@
 #undef __SYSCALL_I386
 #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
 
-typedef void (*sys_call_ptr_t)(void);
+typedef asmlinkage void (*sys_call_ptr_t)(void);
 
-extern void sys_ni_syscall(void);
+extern asmlinkage void sys_ni_syscall(void);
 
 const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
        /*
index 75f33b2a59336a865d7f8abf1dada19763696b70..ff962d4b821e5162415fa06ddf75e8c57d498b51 100644 (file)
@@ -209,6 +209,9 @@ static void __init xen_banner(void)
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
 }
 
+#define CPUID_THERM_POWER_LEAF 6
+#define APERFMPERF_PRESENT 0
+
 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
 
@@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
                *dx = cpuid_leaf5_edx_val;
                return;
 
+       case CPUID_THERM_POWER_LEAF:
+               /* Disabling APERFMPERF for kernel usage */
+               maskecx = ~(1 << APERFMPERF_PRESENT);
+               break;
+
        case 0xb:
                /* Suppress extended topology stuff */
                maskebx = 0;
@@ -1116,7 +1124,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .wbinvd = native_wbinvd,
 
        .read_msr = native_read_msr_safe,
+       .rdmsr_regs = native_rdmsr_safe_regs,
        .write_msr = xen_write_msr_safe,
+       .wrmsr_regs = native_wrmsr_safe_regs,
+
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
 
index ffd08c414e91a7cc9f4a507b4ca3bbad9b823523..64effdc6da9400c09515af384d3d4b94c8efcb50 100644 (file)
@@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
+       int ret = 0;
 
        pfn = page_to_pfn(page);
        if (!PageHighMem(page)) {
@@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
        spin_unlock_irqrestore(&m2p_override_lock, flags);
 
+       /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
+        * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
+        * pfn so that the following mfn_to_pfn(mfn) calls will return the
+        * pfn from the m2p_override (the backend pfn) instead.
+        * We need to do this because the pages shared by the frontend
+        * (xen-blkfront) can be already locked (lock_page, called by
+        * do_read_cache_page); when the userspace backend tries to use them
+        * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
+        * do_blockdev_direct_IO is going to try to lock the same pages
+        * again resulting in a deadlock.
+        * As a side effect get_user_pages_fast might not be safe on the
+        * frontend pages while they are being shared with the backend,
+        * because mfn_to_pfn (that ends up being called by GUPF) will
+        * return the backend pfn rather than the frontend pfn. */
+       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+       if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+               set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_add_override);
@@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
+       int ret = 0;
 
        pfn = page_to_pfn(page);
        mfn = get_phys_to_machine(pfn);
@@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
        } else
                set_phys_to_machine(pfn, page->index);
 
+       /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
+        * somewhere in this domain, even before being added to the
+        * m2p_override (see comment above in m2p_add_override).
+        * If there are no other entries in the m2p_override corresponding
+        * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
+        * the original pfn (the one shared by the frontend): the backend
+        * cannot do any IO on this page anymore because it has been
+        * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
+        * the original pfn causes mfn_to_pfn(mfn) to return the frontend
+        * pfn again. */
+       mfn &= ~FOREIGN_FRAME_BIT;
+       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+       if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+                       m2p_find_override(mfn) == NULL)
+               set_phys_to_machine(pfn, mfn);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_remove_override);
index 3ebba0753d3876b887875aaf53fdd461f544441c..a4790bf22c592326b4b53a80f615e2f2ee658b83 100644 (file)
@@ -371,7 +371,8 @@ char * __init xen_memory_setup(void)
        populated = xen_populate_chunk(map, memmap.nr_entries,
                        max_pfn, &last_pfn, xen_released_pages);
 
-       extra_pages += (xen_released_pages - populated);
+       xen_released_pages -= populated;
+       extra_pages += xen_released_pages;
 
        if (last_pfn > max_pfn) {
                max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
index 7608559de93aa647acfb6c8c877fb03208512b83..f973754ddf90414873b2c63bfdd08b4202f2dcb7 100644 (file)
@@ -68,8 +68,8 @@ endif
 
 # Only build variant and/or platform if it includes a Makefile
 
-buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
-buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
+buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
+buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
 
 # Find libgcc.a
 
index 0b9f2e13c78128b8dc1440305d9db04810a66476..c1dacca312f3906374614ae4eb0a486ab8070e8a 100644 (file)
@@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
 asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
        struct timespec __user *tsp, const sigset_t __user *sigmask,
        size_t sigsetsize);
-
-
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
+               size_t sigsetsize);
index 9b306e550e3f06ddfa342c041c7793543702c4b3..2c8d6a3d250afd2d18fa8b41b03dc26b0ad97ca2 100644 (file)
@@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
 
        /* Don't leak any random bits. */
 
-       memset(elfregs, 0, sizeof (elfregs));
+       memset(elfregs, 0, sizeof(*elfregs));
 
        /* Note:  PS.EXCM is not set while user task is running; its
         * being set in regs->ps is for exception handling convenience.
index c5e4ec0598d24b8ce1406eb73ce83cb580da014b..efe4e854b3cdd06808eec1bcd3fe8fa70b07ed96 100644 (file)
@@ -30,8 +30,6 @@
 
 #define DEBUG_SIG  0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern struct task_struct *coproc_owners[];
 
 struct rt_sigframe
@@ -261,7 +259,6 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, frame))
@@ -452,15 +449,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t oldset;
-
-       if (try_to_freeze())
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
 
        task_pt_regs(current)->icountlevel = 0;
 
@@ -501,19 +489,17 @@ static void do_signal(struct pt_regs *regs)
 
                /* Whee!  Actually deliver the signal.  */
                /* Set up the stack frame */
-               ret = setup_frame(signr, &ka, &info, oldset, regs);
+               ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs);
                if (ret)
                        return;
 
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               block_sigmask(&ka, signr);
+               signal_delivered(signr, &info, &ka, regs, 0);
                if (current->ptrace & PT_SINGLESTEP)
                        task_pt_regs(current)->icountlevel = 1;
 
                return;
        }
 
-no_signal:
        /* Did we come from a system call? */
        if ((signed) regs->syscall >= 0) {
                /* Restart the system call - no handlers present */
@@ -532,8 +518,7 @@ no_signal:
        }
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
+       restore_saved_sigmask();
 
        if (current->ptrace & PT_SINGLESTEP)
                task_pt_regs(current)->icountlevel = 1;
@@ -548,9 +533,6 @@ void do_notify_resume(struct pt_regs *regs)
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs);
 
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
index 88ecea3facb4e8c81f507a667ed55feda6cfcb1a..ee2e2089483d08a532d795743b9a532cf2311f93 100644 (file)
@@ -83,7 +83,6 @@ SECTIONS
 
   _text = .;
   _stext = .;
-  _ftext = .;
 
   .text :
   {
@@ -112,7 +111,7 @@ SECTIONS
   EXCEPTION_TABLE(16)
   /* Data section */
 
-  _fdata = .;
+  _sdata = .;
   RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
   _edata = .;
 
index ba150e5de2ebe6583c2b708525a26e33b8acbfb2..db955179da2d5de8adac6a4ab66d4be17ec943fc 100644 (file)
 
 #include <asm/bootparam.h>
 #include <asm/page.h>
-
-/* References to section boundaries */
-
-extern char _ftext, _etext, _fdata, _edata, _rodata_end;
-extern char __init_begin, __init_end;
+#include <asm/sections.h>
 
 /*
  * mem_reserve(start, end, must_exist)
@@ -197,9 +193,9 @@ void __init mem_init(void)
                        reservedpages++;
        }
 
-       codesize =  (unsigned long) &_etext - (unsigned long) &_ftext;
-       datasize =  (unsigned long) &_edata - (unsigned long) &_fdata;
-       initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+       codesize =  (unsigned long) _etext - (unsigned long) _stext;
+       datasize =  (unsigned long) _edata - (unsigned long) _sdata;
+       initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
 
        printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
               "%ldk data, %ldk init %ldk highmem)\n",
@@ -237,7 +233,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 
 void free_initmem(void)
 {
-       free_reserved_mem(&__init_begin, &__init_end);
-       printk("Freeing unused kernel memory: %dk freed\n",
-              (&__init_end - &__init_begin) >> 10);
+       free_reserved_mem(__init_begin, __init_end);
+       printk("Freeing unused kernel memory: %zuk freed\n",
+              (__init_end - __init_begin) >> 10);
 }
index 02cf6335e9bdc5bb940ec89fcdbfe63746f9b5df..f3b44a65fc7ad5f127bee8bcbadf5b486a7e5c71 100644 (file)
@@ -31,27 +31,6 @@ EXPORT_SYMBOL_GPL(blkcg_root);
 
 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 
-struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
-{
-       return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
-                           struct blkcg, css);
-}
-EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
-
-static struct blkcg *task_blkcg(struct task_struct *tsk)
-{
-       return container_of(task_subsys_state(tsk, blkio_subsys_id),
-                           struct blkcg, css);
-}
-
-struct blkcg *bio_blkcg(struct bio *bio)
-{
-       if (bio && bio->bi_css)
-               return container_of(bio->bi_css, struct blkcg, css);
-       return task_blkcg(current);
-}
-EXPORT_SYMBOL_GPL(bio_blkcg);
-
 static bool blkcg_policy_enabled(struct request_queue *q,
                                 const struct blkcg_policy *pol)
 {
@@ -84,6 +63,7 @@ static void blkg_free(struct blkcg_gq *blkg)
                kfree(pd);
        }
 
+       blk_exit_rl(&blkg->rl);
        kfree(blkg);
 }
 
@@ -91,16 +71,18 @@ static void blkg_free(struct blkcg_gq *blkg)
  * blkg_alloc - allocate a blkg
  * @blkcg: block cgroup the new blkg is associated with
  * @q: request_queue the new blkg is associated with
+ * @gfp_mask: allocation mask to use
  *
  * Allocate a new blkg assocating @blkcg and @q.
  */
-static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
+                                  gfp_t gfp_mask)
 {
        struct blkcg_gq *blkg;
        int i;
 
        /* alloc and init base part */
-       blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
+       blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
        if (!blkg)
                return NULL;
 
@@ -109,6 +91,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
        blkg->blkcg = blkcg;
        blkg->refcnt = 1;
 
+       /* root blkg uses @q->root_rl, init rl only for !root blkgs */
+       if (blkcg != &blkcg_root) {
+               if (blk_init_rl(&blkg->rl, q, gfp_mask))
+                       goto err_free;
+               blkg->rl.blkg = blkg;
+       }
+
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
                struct blkcg_policy *pol = blkcg_policy[i];
                struct blkg_policy_data *pd;
@@ -117,25 +106,23 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
                        continue;
 
                /* alloc per-policy data and attach it to blkg */
-               pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
-               if (!pd) {
-                       blkg_free(blkg);
-                       return NULL;
-               }
+               pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
+               if (!pd)
+                       goto err_free;
 
                blkg->pd[i] = pd;
                pd->blkg = blkg;
-       }
-
-       /* invoke per-policy init */
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
 
+               /* invoke per-policy init */
                if (blkcg_policy_enabled(blkg->q, pol))
                        pol->pd_init_fn(blkg);
        }
 
        return blkg;
+
+err_free:
+       blkg_free(blkg);
+       return NULL;
 }
 
 static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
@@ -179,9 +166,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blkg_lookup);
 
+/*
+ * If @new_blkg is %NULL, this function tries to allocate a new one as
+ * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
+ */
 static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
-                                            struct request_queue *q)
-       __releases(q->queue_lock) __acquires(q->queue_lock)
+                                            struct request_queue *q,
+                                            struct blkcg_gq *new_blkg)
 {
        struct blkcg_gq *blkg;
        int ret;
@@ -193,24 +184,26 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
        blkg = __blkg_lookup(blkcg, q);
        if (blkg) {
                rcu_assign_pointer(blkcg->blkg_hint, blkg);
-               return blkg;
+               goto out_free;
        }
 
        /* blkg holds a reference to blkcg */
-       if (!css_tryget(&blkcg->css))
-               return ERR_PTR(-EINVAL);
+       if (!css_tryget(&blkcg->css)) {
+               blkg = ERR_PTR(-EINVAL);
+               goto out_free;
+       }
 
        /* allocate */
-       ret = -ENOMEM;
-       blkg = blkg_alloc(blkcg, q);
-       if (unlikely(!blkg))
-               goto err_put;
+       if (!new_blkg) {
+               new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
+               if (unlikely(!new_blkg)) {
+                       blkg = ERR_PTR(-ENOMEM);
+                       goto out_put;
+               }
+       }
+       blkg = new_blkg;
 
        /* insert */
-       ret = radix_tree_preload(GFP_ATOMIC);
-       if (ret)
-               goto err_free;
-
        spin_lock(&blkcg->lock);
        ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
        if (likely(!ret)) {
@@ -219,15 +212,15 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
        }
        spin_unlock(&blkcg->lock);
 
-       radix_tree_preload_end();
-
        if (!ret)
                return blkg;
-err_free:
-       blkg_free(blkg);
-err_put:
+
+       blkg = ERR_PTR(ret);
+out_put:
        css_put(&blkcg->css);
-       return ERR_PTR(ret);
+out_free:
+       blkg_free(new_blkg);
+       return blkg;
 }
 
 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
@@ -239,16 +232,15 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
         */
        if (unlikely(blk_queue_bypass(q)))
                return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
-       return __blkg_lookup_create(blkcg, q);
+       return __blkg_lookup_create(blkcg, q, NULL);
 }
 EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
-       struct request_queue *q = blkg->q;
        struct blkcg *blkcg = blkg->blkcg;
 
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
        /* Something wrong if we are trying to remove same group twice */
@@ -318,6 +310,38 @@ void __blkg_release(struct blkcg_gq *blkg)
 }
 EXPORT_SYMBOL_GPL(__blkg_release);
 
+/*
+ * The next function used by blk_queue_for_each_rl().  It's a bit tricky
+ * because the root blkg uses @q->root_rl instead of its own rl.
+ */
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+                                        struct request_queue *q)
+{
+       struct list_head *ent;
+       struct blkcg_gq *blkg;
+
+       /*
+        * Determine the current blkg list_head.  The first entry is
+        * root_rl which is off @q->blkg_list and mapped to the head.
+        */
+       if (rl == &q->root_rl) {
+               ent = &q->blkg_list;
+       } else {
+               blkg = container_of(rl, struct blkcg_gq, rl);
+               ent = &blkg->q_node;
+       }
+
+       /* walk to the next list_head, skip root blkcg */
+       ent = ent->next;
+       if (ent == &q->root_blkg->q_node)
+               ent = ent->next;
+       if (ent == &q->blkg_list)
+               return NULL;
+
+       blkg = container_of(ent, struct blkcg_gq, q_node);
+       return &blkg->rl;
+}
+
 static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
                             u64 val)
 {
@@ -739,24 +763,36 @@ int blkcg_activate_policy(struct request_queue *q,
        struct blkcg_gq *blkg;
        struct blkg_policy_data *pd, *n;
        int cnt = 0, ret;
+       bool preloaded;
 
        if (blkcg_policy_enabled(q, pol))
                return 0;
 
+       /* preallocations for root blkg */
+       blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
+       if (!blkg)
+               return -ENOMEM;
+
+       preloaded = !radix_tree_preload(GFP_KERNEL);
+
        blk_queue_bypass_start(q);
 
        /* make sure the root blkg exists and count the existing blkgs */
        spin_lock_irq(q->queue_lock);
 
        rcu_read_lock();
-       blkg = __blkg_lookup_create(&blkcg_root, q);
+       blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
        rcu_read_unlock();
 
+       if (preloaded)
+               radix_tree_preload_end();
+
        if (IS_ERR(blkg)) {
                ret = PTR_ERR(blkg);
                goto out_unlock;
        }
        q->root_blkg = blkg;
+       q->root_rl.blkg = blkg;
 
        list_for_each_entry(blkg, &q->blkg_list, q_node)
                cnt++;
index 8ac457ce7783847522c1340c008d3c7789f3a1b2..24597309e23d38700a6ca2ae80cd9aab71aa3474 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/u64_stats_sync.h>
 #include <linux/seq_file.h>
 #include <linux/radix-tree.h>
+#include <linux/blkdev.h>
 
 /* Max limits for throttle policy */
 #define THROTL_IOPS_MAX                UINT_MAX
@@ -93,6 +94,8 @@ struct blkcg_gq {
        struct list_head                q_node;
        struct hlist_node               blkcg_node;
        struct blkcg                    *blkcg;
+       /* request allocation list for this blkcg-q pair */
+       struct request_list             rl;
        /* reference count */
        int                             refcnt;
 
@@ -120,8 +123,6 @@ struct blkcg_policy {
 
 extern struct blkcg blkcg_root;
 
-struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
-struct blkcg *bio_blkcg(struct bio *bio);
 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
                                    struct request_queue *q);
@@ -160,6 +161,25 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
 
 
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
+{
+       return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
+                           struct blkcg, css);
+}
+
+static inline struct blkcg *task_blkcg(struct task_struct *tsk)
+{
+       return container_of(task_subsys_state(tsk, blkio_subsys_id),
+                           struct blkcg, css);
+}
+
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+       if (bio && bio->bi_css)
+               return container_of(bio->bi_css, struct blkcg, css);
+       return task_blkcg(current);
+}
+
 /**
  * blkg_to_pdata - get policy private data
  * @blkg: blkg of interest
@@ -233,6 +253,95 @@ static inline void blkg_put(struct blkcg_gq *blkg)
                __blkg_release(blkg);
 }
 
+/**
+ * blk_get_rl - get request_list to use
+ * @q: request_queue of interest
+ * @bio: bio which will be attached to the allocated request (may be %NULL)
+ *
+ * The caller wants to allocate a request from @q to use for @bio.  Find
+ * the request_list to use and obtain a reference on it.  Should be called
+ * under queue_lock.  This function is guaranteed to return non-%NULL
+ * request_list.
+ */
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+                                             struct bio *bio)
+{
+       struct blkcg *blkcg;
+       struct blkcg_gq *blkg;
+
+       rcu_read_lock();
+
+       blkcg = bio_blkcg(bio);
+
+       /* bypass blkg lookup and use @q->root_rl directly for root */
+       if (blkcg == &blkcg_root)
+               goto root_rl;
+
+       /*
+        * Try to use blkg->rl.  blkg lookup may fail under memory pressure
+        * or if either the blkcg or queue is going away.  Fall back to
+        * root_rl in such cases.
+        */
+       blkg = blkg_lookup_create(blkcg, q);
+       if (unlikely(IS_ERR(blkg)))
+               goto root_rl;
+
+       blkg_get(blkg);
+       rcu_read_unlock();
+       return &blkg->rl;
+root_rl:
+       rcu_read_unlock();
+       return &q->root_rl;
+}
+
+/**
+ * blk_put_rl - put request_list
+ * @rl: request_list to put
+ *
+ * Put the reference acquired by blk_get_rl().  Should be called under
+ * queue_lock.
+ */
+static inline void blk_put_rl(struct request_list *rl)
+{
+       /* root_rl may not have blkg set */
+       if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
+               blkg_put(rl->blkg);
+}
+
+/**
+ * blk_rq_set_rl - associate a request with a request_list
+ * @rq: request of interest
+ * @rl: target request_list
+ *
+ * Associate @rq with @rl so that accounting and freeing can know the
+ * request_list @rq came from.
+ */
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
+{
+       rq->rl = rl;
+}
+
+/**
+ * blk_rq_rl - return the request_list a request came from
+ * @rq: request of interest
+ *
+ * Return the request_list @rq is allocated from.
+ */
+static inline struct request_list *blk_rq_rl(struct request *rq)
+{
+       return rq->rl;
+}
+
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+                                        struct request_queue *q);
+/**
+ * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
+ *
+ * Should be used under queue_lock.
+ */
+#define blk_queue_for_each_rl(rl, q)   \
+       for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
+
 /**
  * blkg_stat_add - add a value to a blkg_stat
  * @stat: target blkg_stat
@@ -351,6 +460,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
 #else  /* CONFIG_BLK_CGROUP */
 
 struct cgroup;
+struct blkcg;
 
 struct blkg_policy_data {
 };
@@ -361,8 +471,6 @@ struct blkcg_gq {
 struct blkcg_policy {
 };
 
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
 static inline void blkcg_drain_queue(struct request_queue *q) { }
@@ -374,6 +482,9 @@ static inline int blkcg_activate_policy(struct request_queue *q,
 static inline void blkcg_deactivate_policy(struct request_queue *q,
                                           const struct blkcg_policy *pol) { }
 
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+
 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
                                                  struct blkcg_policy *pol) { return NULL; }
 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
@@ -381,5 +492,14 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
 static inline void blkg_get(struct blkcg_gq *blkg) { }
 static inline void blkg_put(struct blkcg_gq *blkg) { }
 
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+                                             struct bio *bio) { return &q->root_rl; }
+static inline void blk_put_rl(struct request_list *rl) { }
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
+static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+
+#define blk_queue_for_each_rl(rl, q)   \
+       for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+
 #endif /* CONFIG_BLK_CGROUP */
 #endif /* _BLK_CGROUP_H */
index 3c923a7aeb56f1658142b091868c3c29ebffd3c5..4b4dbdfbca89fe5769fd4b2f6826f305fca18e26 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
  */
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
+       int i;
+
        while (true) {
                bool drain = false;
-               int i;
 
                spin_lock_irq(q->queue_lock);
 
@@ -386,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (!list_empty(&q->queue_head) && q->request_fn)
                        __blk_run_queue(q);
 
-               drain |= q->rq.elvpriv;
+               drain |= q->nr_rqs_elvpriv;
 
                /*
                 * Unfortunately, requests are queued at and tracked from
@@ -396,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (drain_all) {
                        drain |= !list_empty(&q->queue_head);
                        for (i = 0; i < 2; i++) {
-                               drain |= q->rq.count[i];
+                               drain |= q->nr_rqs[i];
                                drain |= q->in_flight[i];
                                drain |= !list_empty(&q->flush_queue[i]);
                        }
@@ -408,6 +409,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        break;
                msleep(10);
        }
+
+       /*
+        * With queue marked dead, any woken up waiter will fail the
+        * allocation path, so the wakeup chaining is lost and we're
+        * left with hung waiters. We need to wake up those waiters.
+        */
+       if (q->request_fn) {
+               struct request_list *rl;
+
+               spin_lock_irq(q->queue_lock);
+
+               blk_queue_for_each_rl(rl, q)
+                       for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
+                               wake_up_all(&rl->wait[i]);
+
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 
 /**
@@ -467,7 +485,6 @@ void blk_cleanup_queue(struct request_queue *q)
        /* mark @q DEAD, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
        spin_lock_irq(lock);
 
        /*
@@ -485,10 +502,6 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
-
-       if (q->queue_lock != &q->__queue_lock)
-               q->queue_lock = &q->__queue_lock;
-
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
@@ -499,33 +512,43 @@ void blk_cleanup_queue(struct request_queue *q)
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
+       spin_lock_irq(lock);
+       if (q->queue_lock != &q->__queue_lock)
+               q->queue_lock = &q->__queue_lock;
+       spin_unlock_irq(lock);
+
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
 EXPORT_SYMBOL(blk_cleanup_queue);
 
-static int blk_init_free_list(struct request_queue *q)
+int blk_init_rl(struct request_list *rl, struct request_queue *q,
+               gfp_t gfp_mask)
 {
-       struct request_list *rl = &q->rq;
-
        if (unlikely(rl->rq_pool))
                return 0;
 
+       rl->q = q;
        rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
        rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
-       rl->elvpriv = 0;
        init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
        init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
 
        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
-                               mempool_free_slab, request_cachep, q->node);
-
+                                         mempool_free_slab, request_cachep,
+                                         gfp_mask, q->node);
        if (!rl->rq_pool)
                return -ENOMEM;
 
        return 0;
 }
 
+void blk_exit_rl(struct request_list *rl)
+{
+       if (rl->rq_pool)
+               mempool_destroy(rl->rq_pool);
+}
+
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 {
        return blk_alloc_queue_node(gfp_mask, -1);
@@ -667,7 +690,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
        if (!q)
                return NULL;
 
-       if (blk_init_free_list(q))
+       if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
                return NULL;
 
        q->request_fn           = rfn;
@@ -709,15 +732,15 @@ bool blk_get_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_get_queue);
 
-static inline void blk_free_request(struct request_queue *q, struct request *rq)
+static inline void blk_free_request(struct request_list *rl, struct request *rq)
 {
        if (rq->cmd_flags & REQ_ELVPRIV) {
-               elv_put_request(q, rq);
+               elv_put_request(rl->q, rq);
                if (rq->elv.icq)
                        put_io_context(rq->elv.icq->ioc);
        }
 
-       mempool_free(rq, q->rq.rq_pool);
+       mempool_free(rq, rl->rq_pool);
 }
 
 /*
@@ -754,18 +777,23 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
        ioc->last_waited = jiffies;
 }
 
-static void __freed_request(struct request_queue *q, int sync)
+static void __freed_request(struct request_list *rl, int sync)
 {
-       struct request_list *rl = &q->rq;
+       struct request_queue *q = rl->q;
 
-       if (rl->count[sync] < queue_congestion_off_threshold(q))
+       /*
+        * bdi isn't aware of blkcg yet.  As all async IOs end up root
+        * blkcg anyway, just use root blkcg state.
+        */
+       if (rl == &q->root_rl &&
+           rl->count[sync] < queue_congestion_off_threshold(q))
                blk_clear_queue_congested(q, sync);
 
        if (rl->count[sync] + 1 <= q->nr_requests) {
                if (waitqueue_active(&rl->wait[sync]))
                        wake_up(&rl->wait[sync]);
 
-               blk_clear_queue_full(q, sync);
+               blk_clear_rl_full(rl, sync);
        }
 }
 
@@ -773,19 +801,20 @@ static void __freed_request(struct request_queue *q, int sync)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_queue *q, unsigned int flags)
+static void freed_request(struct request_list *rl, unsigned int flags)
 {
-       struct request_list *rl = &q->rq;
+       struct request_queue *q = rl->q;
        int sync = rw_is_sync(flags);
 
+       q->nr_rqs[sync]--;
        rl->count[sync]--;
        if (flags & REQ_ELVPRIV)
-               rl->elvpriv--;
+               q->nr_rqs_elvpriv--;
 
-       __freed_request(q, sync);
+       __freed_request(rl, sync);
 
        if (unlikely(rl->starved[sync ^ 1]))
-               __freed_request(q, sync ^ 1);
+               __freed_request(rl, sync ^ 1);
 }
 
 /*
@@ -824,8 +853,8 @@ static struct io_context *rq_ioc(struct bio *bio)
 }
 
 /**
- * get_request - get a free request
- * @q: request_queue to allocate request from
+ * __get_request - get a free request
+ * @rl: request list to allocate from
  * @rw_flags: RW and SYNC flags
  * @bio: bio to allocate request for (can be %NULL)
  * @gfp_mask: allocation mask
@@ -837,20 +866,16 @@ static struct io_context *rq_ioc(struct bio *bio)
  * Returns %NULL on failure, with @q->queue_lock held.
  * Returns !%NULL on success, with @q->queue_lock *not held*.
  */
-static struct request *get_request(struct request_queue *q, int rw_flags,
-                                  struct bio *bio, gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, int rw_flags,
+                                    struct bio *bio, gfp_t gfp_mask)
 {
+       struct request_queue *q = rl->q;
        struct request *rq;
-       struct request_list *rl = &q->rq;
-       struct elevator_type *et;
-       struct io_context *ioc;
+       struct elevator_type *et = q->elevator->type;
+       struct io_context *ioc = rq_ioc(bio);
        struct io_cq *icq = NULL;
        const bool is_sync = rw_is_sync(rw_flags) != 0;
-       bool retried = false;
        int may_queue;
-retry:
-       et = q->elevator->type;
-       ioc = rq_ioc(bio);
 
        if (unlikely(blk_queue_dead(q)))
                return NULL;
@@ -861,29 +886,15 @@ retry:
 
        if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
                if (rl->count[is_sync]+1 >= q->nr_requests) {
-                       /*
-                        * We want ioc to record batching state.  If it's
-                        * not already there, creating a new one requires
-                        * dropping queue_lock, which in turn requires
-                        * retesting conditions to avoid queue hang.
-                        */
-                       if (!ioc && !retried) {
-                               spin_unlock_irq(q->queue_lock);
-                               create_io_context(gfp_mask, q->node);
-                               spin_lock_irq(q->queue_lock);
-                               retried = true;
-                               goto retry;
-                       }
-
                        /*
                         * The queue will fill after this allocation, so set
                         * it as full, and mark this process as "batching".
                         * This process will be allowed to complete a batch of
                         * requests, others will be blocked.
                         */
-                       if (!blk_queue_full(q, is_sync)) {
+                       if (!blk_rl_full(rl, is_sync)) {
                                ioc_set_batching(q, ioc);
-                               blk_set_queue_full(q, is_sync);
+                               blk_set_rl_full(rl, is_sync);
                        } else {
                                if (may_queue != ELV_MQUEUE_MUST
                                                && !ioc_batching(q, ioc)) {
@@ -896,7 +907,12 @@ retry:
                                }
                        }
                }
-               blk_set_queue_congested(q, is_sync);
+               /*
+                * bdi isn't aware of blkcg yet.  As all async IOs end up
+                * root blkcg anyway, just use root blkcg state.
+                */
+               if (rl == &q->root_rl)
+                       blk_set_queue_congested(q, is_sync);
        }
 
        /*
@@ -907,6 +923,7 @@ retry:
        if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
                return NULL;
 
+       q->nr_rqs[is_sync]++;
        rl->count[is_sync]++;
        rl->starved[is_sync] = 0;
 
@@ -922,7 +939,7 @@ retry:
         */
        if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
                rw_flags |= REQ_ELVPRIV;
-               rl->elvpriv++;
+               q->nr_rqs_elvpriv++;
                if (et->icq_cache && ioc)
                        icq = ioc_lookup_icq(ioc, q);
        }
@@ -932,22 +949,19 @@ retry:
        spin_unlock_irq(q->queue_lock);
 
        /* allocate and init request */
-       rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+       rq = mempool_alloc(rl->rq_pool, gfp_mask);
        if (!rq)
                goto fail_alloc;
 
        blk_rq_init(q, rq);
+       blk_rq_set_rl(rq, rl);
        rq->cmd_flags = rw_flags | REQ_ALLOCED;
 
        /* init elvpriv */
        if (rw_flags & REQ_ELVPRIV) {
                if (unlikely(et->icq_cache && !icq)) {
-                       create_io_context(gfp_mask, q->node);
-                       ioc = rq_ioc(bio);
-                       if (!ioc)
-                               goto fail_elvpriv;
-
-                       icq = ioc_create_icq(ioc, q, gfp_mask);
+                       if (ioc)
+                               icq = ioc_create_icq(ioc, q, gfp_mask);
                        if (!icq)
                                goto fail_elvpriv;
                }
@@ -987,7 +1001,7 @@ fail_elvpriv:
        rq->elv.icq = NULL;
 
        spin_lock_irq(q->queue_lock);
-       rl->elvpriv--;
+       q->nr_rqs_elvpriv--;
        spin_unlock_irq(q->queue_lock);
        goto out;
 
@@ -1000,7 +1014,7 @@ fail_alloc:
         * queue, but this is pretty rare.
         */
        spin_lock_irq(q->queue_lock);
-       freed_request(q, rw_flags);
+       freed_request(rl, rw_flags);
 
        /*
         * in the very unlikely event that allocation failed and no
@@ -1016,56 +1030,58 @@ rq_starved:
 }
 
 /**
- * get_request_wait - get a free request with retry
+ * get_request - get a free request
  * @q: request_queue to allocate request from
  * @rw_flags: RW and SYNC flags
  * @bio: bio to allocate request for (can be %NULL)
+ * @gfp_mask: allocation mask
  *
- * Get a free request from @q.  This function keeps retrying under memory
- * pressure and fails iff @q is dead.
+ * Get a free request from @q.  If %__GFP_WAIT is set in @gfp_mask, this
+ * function keeps retrying under memory pressure and fails iff @q is dead.
  *
  * Must be callled with @q->queue_lock held and,
  * Returns %NULL on failure, with @q->queue_lock held.
  * Returns !%NULL on success, with @q->queue_lock *not held*.
  */
-static struct request *get_request_wait(struct request_queue *q, int rw_flags,
-                                       struct bio *bio)
+static struct request *get_request(struct request_queue *q, int rw_flags,
+                                  struct bio *bio, gfp_t gfp_mask)
 {
        const bool is_sync = rw_is_sync(rw_flags) != 0;
+       DEFINE_WAIT(wait);
+       struct request_list *rl;
        struct request *rq;
 
-       rq = get_request(q, rw_flags, bio, GFP_NOIO);
-       while (!rq) {
-               DEFINE_WAIT(wait);
-               struct request_list *rl = &q->rq;
-
-               if (unlikely(blk_queue_dead(q)))
-                       return NULL;
+       rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
+retry:
+       rq = __get_request(rl, rw_flags, bio, gfp_mask);
+       if (rq)
+               return rq;
 
-               prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
-                               TASK_UNINTERRUPTIBLE);
+       if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+               blk_put_rl(rl);
+               return NULL;
+       }
 
-               trace_block_sleeprq(q, bio, rw_flags & 1);
+       /* wait on @rl and retry */
+       prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
+                                 TASK_UNINTERRUPTIBLE);
 
-               spin_unlock_irq(q->queue_lock);
-               io_schedule();
+       trace_block_sleeprq(q, bio, rw_flags & 1);
 
-               /*
-                * After sleeping, we become a "batching" process and
-                * will be able to allocate at least one request, and
-                * up to a big batch of them for a small period time.
-                * See ioc_batching, ioc_set_batching
-                */
-               create_io_context(GFP_NOIO, q->node);
-               ioc_set_batching(q, current->io_context);
+       spin_unlock_irq(q->queue_lock);
+       io_schedule();
 
-               spin_lock_irq(q->queue_lock);
-               finish_wait(&rl->wait[is_sync], &wait);
+       /*
+        * After sleeping, we become a "batching" process and will be able
+        * to allocate at least one request, and up to a big batch of them
+        * for a small period time.  See ioc_batching, ioc_set_batching
+        */
+       ioc_set_batching(q, current->io_context);
 
-               rq = get_request(q, rw_flags, bio, GFP_NOIO);
-       };
+       spin_lock_irq(q->queue_lock);
+       finish_wait(&rl->wait[is_sync], &wait);
 
-       return rq;
+       goto retry;
 }
 
 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
@@ -1074,11 +1090,11 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 
        BUG_ON(rw != READ && rw != WRITE);
 
+       /* create ioc upfront */
+       create_io_context(gfp_mask, q->node);
+
        spin_lock_irq(q->queue_lock);
-       if (gfp_mask & __GFP_WAIT)
-               rq = get_request_wait(q, rw, NULL);
-       else
-               rq = get_request(q, rw, NULL, gfp_mask);
+       rq = get_request(q, rw, NULL, gfp_mask);
        if (!rq)
                spin_unlock_irq(q->queue_lock);
        /* q->queue_lock is unlocked at this point */
@@ -1235,12 +1251,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
         */
        if (req->cmd_flags & REQ_ALLOCED) {
                unsigned int flags = req->cmd_flags;
+               struct request_list *rl = blk_rq_rl(req);
 
                BUG_ON(!list_empty(&req->queuelist));
                BUG_ON(!hlist_unhashed(&req->hash));
 
-               blk_free_request(q, req);
-               freed_request(q, flags);
+               blk_free_request(rl, req);
+               freed_request(rl, flags);
+               blk_put_rl(rl);
        }
 }
 EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1468,7 +1486,7 @@ get_rq:
         * Grab a free request. This is might sleep but can not fail.
         * Returns with the queue unlocked.
         */
-       req = get_request_wait(q, rw_flags, bio);
+       req = get_request(q, rw_flags, bio, GFP_NOIO);
        if (unlikely(!req)) {
                bio_endio(bio, -ENODEV);        /* @q is dead */
                goto out_unlock;
@@ -1689,6 +1707,14 @@ generic_make_request_checks(struct bio *bio)
                goto end_io;
        }
 
+       /*
+        * Various block parts want %current->io_context and lazy ioc
+        * allocation ends up trading a lot of pain for a small amount of
+        * memory.  Just allocate it upfront.  This may fail and block
+        * layer knows how to live with it.
+        */
+       create_io_context(GFP_ATOMIC, q->node);
+
        if (blk_throtl_bio(q, bio))
                return false;   /* throttled, will be resubmitted later */
 
@@ -2883,23 +2909,47 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
 
 }
 
-static void flush_plug_callbacks(struct blk_plug *plug)
+static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
 {
        LIST_HEAD(callbacks);
 
-       if (list_empty(&plug->cb_list))
-               return;
-
-       list_splice_init(&plug->cb_list, &callbacks);
+       while (!list_empty(&plug->cb_list)) {
+               list_splice_init(&plug->cb_list, &callbacks);
 
-       while (!list_empty(&callbacks)) {
-               struct blk_plug_cb *cb = list_first_entry(&callbacks,
+               while (!list_empty(&callbacks)) {
+                       struct blk_plug_cb *cb = list_first_entry(&callbacks,
                                                          struct blk_plug_cb,
                                                          list);
-               list_del(&cb->list);
-               cb->callback(cb);
+                       list_del(&cb->list);
+                       cb->callback(cb, from_schedule);
+               }
+       }
+}
+
+struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
+                                     int size)
+{
+       struct blk_plug *plug = current->plug;
+       struct blk_plug_cb *cb;
+
+       if (!plug)
+               return NULL;
+
+       list_for_each_entry(cb, &plug->cb_list, list)
+               if (cb->callback == unplug && cb->data == data)
+                       return cb;
+
+       /* Not currently on the callback list */
+       BUG_ON(size < sizeof(*cb));
+       cb = kzalloc(size, GFP_ATOMIC);
+       if (cb) {
+               cb->data = data;
+               cb->callback = unplug;
+               list_add(&cb->list, &plug->cb_list);
        }
+       return cb;
 }
+EXPORT_SYMBOL(blk_check_plugged);
 
 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
@@ -2911,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 
        BUG_ON(plug->magic != PLUG_MAGIC);
 
-       flush_plug_callbacks(plug);
+       flush_plug_callbacks(plug, from_schedule);
        if (list_empty(&plug->list))
                return;
 
index 1e2d53b04858fc2d107aa8c5ead7478d53575fba..893b8007c657e8bd0ca93d5ba61e9e7d02aa892b 100644 (file)
@@ -235,6 +235,7 @@ void ioc_clear_queue(struct request_queue *q)
 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
 {
        struct io_context *ioc;
+       int ret;
 
        ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
                                    node);
@@ -262,9 +263,12 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
                task->io_context = ioc;
        else
                kmem_cache_free(iocontext_cachep, ioc);
+
+       ret = task->io_context ? 0 : -EBUSY;
+
        task_unlock(task);
 
-       return 0;
+       return ret;
 }
 
 /**
index aa41b47c22d2e89525a5bd3cfb9501e67634bff7..9628b291f96057a42cbf6a5492bd7480fe7e93da 100644 (file)
@@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
 static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
-       struct request_list *rl = &q->rq;
+       struct request_list *rl;
        unsigned long nr;
        int ret;
 
@@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
+       /* congestion isn't cgroup aware and follows root blkcg for now */
+       rl = &q->root_rl;
+
        if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
                blk_set_queue_congested(q, BLK_RW_SYNC);
        else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
@@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
                blk_clear_queue_congested(q, BLK_RW_ASYNC);
 
-       if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
-               blk_set_queue_full(q, BLK_RW_SYNC);
-       } else {
-               blk_clear_queue_full(q, BLK_RW_SYNC);
-               wake_up(&rl->wait[BLK_RW_SYNC]);
+       blk_queue_for_each_rl(rl, q) {
+               if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+                       blk_set_rl_full(rl, BLK_RW_SYNC);
+               } else {
+                       blk_clear_rl_full(rl, BLK_RW_SYNC);
+                       wake_up(&rl->wait[BLK_RW_SYNC]);
+               }
+
+               if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+                       blk_set_rl_full(rl, BLK_RW_ASYNC);
+               } else {
+                       blk_clear_rl_full(rl, BLK_RW_ASYNC);
+                       wake_up(&rl->wait[BLK_RW_ASYNC]);
+               }
        }
 
-       if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
-               blk_set_queue_full(q, BLK_RW_ASYNC);
-       } else {
-               blk_clear_queue_full(q, BLK_RW_ASYNC);
-               wake_up(&rl->wait[BLK_RW_ASYNC]);
-       }
        spin_unlock_irq(q->queue_lock);
        return ret;
 }
@@ -476,7 +482,6 @@ static void blk_release_queue(struct kobject *kobj)
 {
        struct request_queue *q =
                container_of(kobj, struct request_queue, kobj);
-       struct request_list *rl = &q->rq;
 
        blk_sync_queue(q);
 
@@ -489,8 +494,7 @@ static void blk_release_queue(struct kobject *kobj)
                elevator_exit(q->elevator);
        }
 
-       if (rl->rq_pool)
-               mempool_destroy(rl->rq_pool);
+       blk_exit_rl(&q->root_rl);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
index 5b0659512047208efdcc3db7d714bc72dfd456f5..e287c19908c8a31d3c4d29b1586921066032afa6 100644 (file)
@@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
                goto out;
        }
 
-       /* bio_associate_current() needs ioc, try creating */
-       create_io_context(GFP_ATOMIC, q->node);
-
        /*
         * A throtl_grp pointer retrieved under rcu can be used to access
         * basic fields like stats and io rates. If a group has no rules,
index 780354888958cd7ea03aef2c1654b325bc9fb24e..6e4744cbfb56b4ca0d99062a0d9b0437c894016d 100644 (file)
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
                mod_timer(&q->timeout, expiry);
 }
 
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue:     pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
-       unsigned long flags;
-       struct request *rq, *tmp;
-       LIST_HEAD(list);
-
-       /*
-        * Not a request based block device, nothing to abort
-        */
-       if (!q->request_fn)
-               return;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       elv_abort_queue(q);
-
-       /*
-        * Splice entries to local list, to avoid deadlocking if entries
-        * get readded to the timeout list by error handling
-        */
-       list_splice_init(&q->timeout_list, &list);
-
-       list_for_each_entry_safe(rq, tmp, &list, timeout_list)
-               blk_abort_request(rq);
-
-       /*
-        * Occasionally, blk_abort_request() will return without
-        * deleting the element from the list. Make sure we add those back
-        * instead of leaving them on the local stack list.
-        */
-       list_splice(&list, &q->timeout_list);
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
index 85f6ae42f7d3f698e9e82c75064f428065953e70..a134231fd22a31acad73cf58815060f7ae81d708 100644 (file)
@@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q)
        kobject_get(&q->kobj);
 }
 
+int blk_init_rl(struct request_list *rl, struct request_queue *q,
+               gfp_t gfp_mask);
+void blk_exit_rl(struct request_list *rl);
 void init_request_from_bio(struct request *req, struct bio *bio);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
index 7ad49c88f6b197a04c66e05aab9facacd2781af4..deee61fbb7419005886234b47a68b73b5833d20e 100644 (file)
@@ -243,56 +243,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q,
        return 0;
 }
 EXPORT_SYMBOL_GPL(bsg_setup_queue);
-
-/**
- * bsg_remove_queue - Deletes the bsg dev from the q
- * @q: the request_queue that is to be torn down.
- *
- * Notes:
- *   Before unregistering the queue empty any requests that are blocked
- */
-void bsg_remove_queue(struct request_queue *q)
-{
-       struct request *req; /* block request */
-       int counts; /* totals for request_list count and starved */
-
-       if (!q)
-               return;
-
-       /* Stop taking in new requests */
-       spin_lock_irq(q->queue_lock);
-       blk_stop_queue(q);
-
-       /* drain all requests in the queue */
-       while (1) {
-               /* need the lock to fetch a request
-                * this may fetch the same reqeust as the previous pass
-                */
-               req = blk_fetch_request(q);
-               /* save requests in use and starved */
-               counts = q->rq.count[0] + q->rq.count[1] +
-                        q->rq.starved[0] + q->rq.starved[1];
-               spin_unlock_irq(q->queue_lock);
-               /* any requests still outstanding? */
-               if (counts == 0)
-                       break;
-
-               /* This may be the same req as the previous iteration,
-                * always send the blk_end_request_all after a prefetch.
-                * It is not okay to not end the request because the
-                * prefetch started the request.
-                */
-               if (req) {
-                       /* return -ENXIO to indicate that this queue is
-                        * going away
-                        */
-                       req->errors = -ENXIO;
-                       blk_end_request_all(req, -ENXIO);
-               }
-
-               msleep(200); /* allow bsg to possibly finish */
-               spin_lock_irq(q->queue_lock);
-       }
-       bsg_unregister_queue(q);
-}
-EXPORT_SYMBOL_GPL(bsg_remove_queue);
index 673c977cc2bfa238e0fe0efa6dddac5193fbccd8..fb52df9744f5fe411908162fbb02257ca0bc097a 100644 (file)
@@ -17,8 +17,6 @@
 #include "blk.h"
 #include "blk-cgroup.h"
 
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
 /*
  * tunables
  */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 }
 
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
-       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 {
        return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
        kfree(cfqd->root_group);
 #endif
-       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
        kfree(cfqd);
 }
 
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        if (!cfq_group_idle)
                cfq_group_idle = 1;
-#else
-               cfq_group_idle = 0;
-#endif
 
        ret = blkcg_policy_register(&blkcg_policy_cfq);
        if (ret)
                return ret;
+#else
+       cfq_group_idle = 0;
+#endif
 
+       ret = -ENOMEM;
        cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
 err_free_pool:
        kmem_cache_destroy(cfq_pool);
 err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        return ret;
 }
 
 static void __exit cfq_exit(void)
 {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        elv_unregister(&iosched_cfq);
        kmem_cache_destroy(cfq_pool);
 }
index 260fa80ef5750f80f4ebc1415d06abc992e18b8a..9a87daa6f4fbd10202ea9d47ae1a549c806a6fb4 100644 (file)
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
                break;
        }
 
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
        /* In particular, rule out all resets and host-specific ioctls.  */
        printk_ratelimited(KERN_WARNING
                           "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
-       return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+       return -ENOIOCTLCMD;
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
index 47768ff87343d27d2cc5228aa6e02f6cfdff20a0..80998958cf45381f4e1ef6f585b9467b31d1e4b3 100644 (file)
@@ -208,7 +208,7 @@ config ACPI_IPMI
 
 config ACPI_HOTPLUG_CPU
        bool
-       depends on ACPI_PROCESSOR && HOTPLUG_CPU
+       depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
        select ACPI_CONTAINER
        default y
 
index a43fa1a57d57f45616c9d70c362df15d3b71c8e5..1502c50273b5431f95e04f9be7de11356d1a8f98 100644 (file)
@@ -36,6 +36,7 @@
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
 
 static unsigned long power_saving_mwait_eax;
 
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
                return;
 
-       mutex_lock(&isolated_cpus_lock);
+       mutex_lock(&round_robin_lock);
        cpumask_clear(tmp);
        for_each_cpu(cpu, pad_busy_cpus)
                cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (cpumask_empty(tmp))
                cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
        if (cpumask_empty(tmp)) {
-               mutex_unlock(&isolated_cpus_lock);
+               mutex_unlock(&round_robin_lock);
                return;
        }
        for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        tsk_in_cpu[tsk_index] = preferred_cpu;
        cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
        cpu_weight[preferred_cpu]++;
-       mutex_unlock(&isolated_cpus_lock);
+       mutex_unlock(&round_robin_lock);
 
        set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
 }
index 0ed85cac32314f956de3092f7087e3ab011b5ba7..615996a36bedcef10f63bc929b21b3861c9781c6 100644 (file)
@@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
                return_ACPI_STATUS(status);
        }
 
-       if (sleep_state != ACPI_STATE_S5) {
-               /*
-                * Disable BM arbitration. This feature is contained within an
-                * optional register (PM2 Control), so ignore a BAD_ADDRESS
-                * exception.
-                */
-               status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
-               if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-                       return_ACPI_STATUS(status);
-               }
-       }
-
        /*
         * 1) Disable/Clear all GPEs
         * 2) Enable all wakeup GPEs
@@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
                                    [ACPI_EVENT_POWER_BUTTON].
                                    status_register_id, ACPI_CLEAR_STATUS);
 
-       /*
-        * Enable BM arbitration. This feature is contained within an
-        * optional register (PM2 Control), so ignore a BAD_ADDRESS
-        * exception.
-        */
-       status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
-       if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-               return_ACPI_STATUS(status);
-       }
-
        acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
        return_ACPI_STATUS(status);
 }
index 23ce096864186a1cfd6ced9e57fd17d208646004..fe6626035495bb398fbf2d7fc7097d195a916d29 100644 (file)
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
                        /* Create the new outer package and populate it */
 
                        status =
-                           acpi_ns_wrap_with_package(data, *elements,
+                           acpi_ns_wrap_with_package(data, return_object,
                                                      return_object_ptr);
                        if (ACPI_FAILURE(status)) {
                                return (status);
index 5577762daee1d7d22a8dbf49a27cf02ead2e54a4..6686b1eaf13e5fdd7e9ce6813717d7fcbbf5c2be 100644 (file)
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               return acpi_os_map_generic_address(&entry->register_region);
+               return apei_map_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               acpi_os_unmap_generic_address(&entry->register_region);
+               apei_unmap_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
        return 0;
 }
 
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+       int rc;
+       u32 access_bit_width;
+       u64 address;
+
+       rc = apei_check_gar(reg, &address, &access_bit_width);
+       if (rc)
+               return rc;
+       return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
 /* read GAR in interrupt (including NMI) or process context */
 int apei_read(u64 *val, struct acpi_generic_address *reg)
 {
index cca240a33038fe148ca5e9324214ba988c62c6c2..f220d642136ed94a1844411ee532e777be6f847e 100644 (file)
@@ -7,6 +7,8 @@
 #define APEI_INTERNAL_H
 
 #include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
 
 struct apei_exec_context;
 
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
 /* IP has been set in instruction function */
 #define APEI_EXEC_SET_IP       1
 
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+       acpi_os_unmap_generic_address(reg);
+}
+
 int apei_read(u64 *val, struct acpi_generic_address *reg);
 int apei_write(u64 val, struct acpi_generic_address *reg);
 
index 9b3cac0abecc33c672c884ece470dfb494d1836c..1599566ed1fe077113b10200d22d0db097fd25fd 100644 (file)
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        if (!ghes)
                return ERR_PTR(-ENOMEM);
        ghes->generic = generic;
-       rc = acpi_os_map_generic_address(&generic->error_status_address);
+       rc = apei_map_generic_address(&generic->error_status_address);
        if (rc)
                goto err_free;
        error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        return ghes;
 
 err_unmap:
-       acpi_os_unmap_generic_address(&generic->error_status_address);
+       apei_unmap_generic_address(&generic->error_status_address);
 err_free:
        kfree(ghes);
        return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
 static void ghes_fini(struct ghes *ghes)
 {
        kfree(ghes->estatus);
-       acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+       apei_unmap_generic_address(&ghes->generic->error_status_address);
 }
 
 enum {
index 86933ca8b4724372505530de753332907f488ebe..7dd3f9fb9f3f21b808e30b8aa78f65fce7b2275a 100644 (file)
@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
 
 static void acpi_battery_refresh(struct acpi_battery *battery)
 {
+       int power_unit;
+
        if (!battery->bat.dev)
                return;
 
+       power_unit = battery->power_unit;
+
        acpi_battery_get_info(battery);
-       /* The battery may have changed its reporting units. */
+
+       if (power_unit == battery->power_unit)
+               return;
+
+       /* The battery has changed its reporting units. */
        sysfs_remove_battery(battery);
        sysfs_add_battery(battery);
 }
index 3188da3df8da945f1c4d5150e0254f592ec6c291..adceafda9c171987a9b0f7dff625e34f53a68c30 100644 (file)
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
                                  Power Management
    -------------------------------------------------------------------------- */
 
+static const char *state_string(int state)
+{
+       switch (state) {
+       case ACPI_STATE_D0:
+               return "D0";
+       case ACPI_STATE_D1:
+               return "D1";
+       case ACPI_STATE_D2:
+               return "D2";
+       case ACPI_STATE_D3_HOT:
+               return "D3hot";
+       case ACPI_STATE_D3_COLD:
+               return "D3";
+       default:
+               return "(unknown)";
+       }
+}
+
 static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
-       int result = 0;
-       acpi_status status = 0;
-       unsigned long long psc = 0;
+       int result = ACPI_STATE_UNKNOWN;
 
        if (!device || !state)
                return -EINVAL;
 
-       *state = ACPI_STATE_UNKNOWN;
-
-       if (device->flags.power_manageable) {
-               /*
-                * Get the device's power state either directly (via _PSC) or
-                * indirectly (via power resources).
-                */
-               if (device->power.flags.power_resources) {
-                       result = acpi_power_get_inferred_state(device, state);
-                       if (result)
-                               return result;
-               } else if (device->power.flags.explicit_get) {
-                       status = acpi_evaluate_integer(device->handle, "_PSC",
-                                                      NULL, &psc);
-                       if (ACPI_FAILURE(status))
-                               return -ENODEV;
-                       *state = (int)psc;
-               }
-       } else {
+       if (!device->flags.power_manageable) {
                /* TBD: Non-recursive algorithm for walking up hierarchy. */
                *state = device->parent ?
                        device->parent->power.state : ACPI_STATE_D0;
+               goto out;
+       }
+
+       /*
+        * Get the device's power state either directly (via _PSC) or
+        * indirectly (via power resources).
+        */
+       if (device->power.flags.explicit_get) {
+               unsigned long long psc;
+               acpi_status status = acpi_evaluate_integer(device->handle,
+                                                          "_PSC", NULL, &psc);
+               if (ACPI_FAILURE(status))
+                       return -ENODEV;
+
+               result = psc;
+       }
+       /* The test below covers ACPI_STATE_UNKNOWN too. */
+       if (result <= ACPI_STATE_D2) {
+         ; /* Do nothing. */
+       } else if (device->power.flags.power_resources) {
+               int error = acpi_power_get_inferred_state(device, &result);
+               if (error)
+                       return error;
+       } else if (result == ACPI_STATE_D3_HOT) {
+               result = ACPI_STATE_D3;
        }
+       *state = result;
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-                         device->pnp.bus_id, *state));
+ out:
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+                         device->pnp.bus_id, state_string(*state)));
 
        return 0;
 }
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
        /* Make sure this is a valid target state */
 
        if (state == device->power.state) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
-                                 state));
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+                                 state_string(state)));
                return 0;
        }
 
        if (!device->power.states[state].flags.valid) {
-               printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+               printk(KERN_WARNING PREFIX "Device does not support %s\n",
+                      state_string(state));
                return -ENODEV;
        }
        if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
       end:
        if (result)
                printk(KERN_WARNING PREFIX
-                             "Device [%s] failed to transition to D%d\n",
-                             device->pnp.bus_id, state);
+                             "Device [%s] failed to transition to %s\n",
+                             device->pnp.bus_id, state_string(state));
        else {
                device->power.state = state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Device [%s] transitioned to D%d\n",
-                                 device->pnp.bus_id, state));
+                                 "Device [%s] transitioned to %s\n",
+                                 device->pnp.bus_id, state_string(state)));
        }
 
        return result;
index 0500f719f63e2aa3d7e8c711e809f84961eb8f4d..dd6d6a3c6780d7e787c1c3427b808618d8c64fa4 100644 (file)
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
         * We know a device's inferred power state when all the resources
         * required for a given D-state are 'on'.
         */
-       for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+       for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
                list = &device->power.states[i].resources;
                if (list->count < 1)
                        continue;
index c850de4c9a146883a7d91f16c8cd8ddc3b694940..eff722278ff539535bec85d5f5f6f4f2b04b79bf 100644 (file)
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always return 0 for CPU0's handle.
+                * Ignores apic_id and always returns 0 for the processor
+                * handle with acpi id 0 if nr_cpu_ids is 1.
+                * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
                 */
-               if (acpi_id == 0)
+               if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
                        return apic_id;
index f3decb30223fd1b376775310ae25dec63424f920..47a8caa89dbe58fc00bafa17bc75e2dba079eb8a 100644 (file)
@@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 /*
  * Suspend / resume control
  */
+static int acpi_idle_suspend;
 static u32 saved_bm_rld;
 
 static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void)
 
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 {
+       if (acpi_idle_suspend == 1)
+               return 0;
+
        acpi_idle_bm_rld_save();
+       acpi_idle_suspend = 1;
        return 0;
 }
 
 int acpi_processor_resume(struct acpi_device * device)
 {
+       if (acpi_idle_suspend == 0)
+               return 0;
+
        acpi_idle_bm_rld_restore();
+       acpi_idle_suspend = 0;
        return 0;
 }
 
@@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        lapic_timer_state_broadcast(pr, cx, 1);
        kt1 = ktime_get_real();
        acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
@@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
                                                drv, drv->safe_state_index);
                } else {
                        local_irq_disable();
-                       acpi_safe_halt();
+                       if (!acpi_idle_suspend)
+                               acpi_safe_halt();
                        local_irq_enable();
-                       return -EINVAL;
+                       return -EBUSY;
                }
        }
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
index 0af48a8554cd786620266c5449fb82798f034de1..a093dc163a42a8b677d9ccb7b27a61d0427c3f49 100644 (file)
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
        struct acpi_buffer state = { 0, NULL };
        union acpi_object *pss = NULL;
        int i;
+       int last_invalid = -1;
 
 
        status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
                    ((u32)(px->core_frequency * 1000) !=
                     (px->core_frequency * 1000))) {
                        printk(KERN_ERR FW_BUG PREFIX
-                              "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
-                              px->core_frequency);
-                       result = -EFAULT;
-                       kfree(pr->performance->states);
-                       goto end;
+                              "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
+                              pr->id, px->core_frequency);
+                       if (last_invalid == -1)
+                               last_invalid = i;
+               } else {
+                       if (last_invalid != -1) {
+                               /*
+                                * Copy this valid entry over last_invalid entry
+                                */
+                               memcpy(&(pr->performance->states[last_invalid]),
+                                      px, sizeof(struct acpi_processor_px));
+                               ++last_invalid;
+                       }
                }
        }
 
+       if (last_invalid == 0) {
+               printk(KERN_ERR FW_BUG PREFIX
+                      "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+               result = -EFAULT;
+               kfree(pr->performance->states);
+               pr->performance->states = NULL;
+       }
+
+       if (last_invalid > 0)
+               pr->performance->state_count = last_invalid;
+
       end:
        kfree(buffer.pointer);
 
index 85cbfdccc97cbfc0dcdcf7c236e57c8c003b0700..c8a1f3b68110b86dd2c6b0ddddc91ee2085e93b1 100644 (file)
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void)
                                                ACPI_BUS_TYPE_POWER_BUTTON,
                                                ACPI_STA_DEFAULT,
                                                &ops);
+               device_init_wakeup(&device->dev, true);
        }
 
        if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
index ebaa04593236e5290355e392cfd734618f0c9538..88561029cca83915bffd7cc1d59e03f28d1fbfb4 100644 (file)
@@ -25,8 +25,6 @@
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
-#include <asm/realmode.h>
-
 #include "internal.h"
 #include "sleep.h"
 
@@ -59,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
 MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
 
 static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -93,13 +92,11 @@ static struct notifier_block tts_notifier = {
 static int acpi_sleep_prepare(u32 acpi_state)
 {
 #ifdef CONFIG_ACPI_SLEEP
-       unsigned long wakeup_pa = real_mode_header->wakeup_start;
        /* do we have a wakeup address for S2 and S3? */
        if (acpi_state == ACPI_STATE_S3) {
-               if (!wakeup_pa)
+               if (!acpi_wakeup_address)
                        return -EFAULT;
-               acpi_set_firmware_waking_vector(
-                               (acpi_physical_address)wakeup_pa);
+               acpi_set_firmware_waking_vector(acpi_wakeup_address);
 
        }
        ACPI_FLUSH_CPU_CACHE();
@@ -188,6 +185,14 @@ static int acpi_pm_prepare(void)
        return error;
 }
 
+static int find_powerf_dev(struct device *dev, void *data)
+{
+       struct acpi_device *device = to_acpi_device(dev);
+       const char *hid = acpi_device_hid(device);
+
+       return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
 /**
  *     acpi_pm_finish - Instruct the platform to leave a sleep state.
  *
@@ -196,6 +201,7 @@ static int acpi_pm_prepare(void)
  */
 static void acpi_pm_finish(void)
 {
+       struct device *pwr_btn_dev;
        u32 acpi_state = acpi_target_sleep_state;
 
        acpi_ec_unblock_transactions();
@@ -213,6 +219,23 @@ static void acpi_pm_finish(void)
        acpi_set_firmware_waking_vector((acpi_physical_address) 0);
 
        acpi_target_sleep_state = ACPI_STATE_S0;
+
+       /* If we were woken with the fixed power button, provide a small
+        * hint to userspace in the form of a wakeup event on the fixed power
+        * button device (if it can be found).
+        *
+        * We delay the event generation til now, as the PM layer requires
+        * timekeeping to be running before we generate events. */
+       if (!pwr_btn_event_pending)
+               return;
+
+       pwr_btn_event_pending = false;
+       pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+                                     find_powerf_dev);
+       if (pwr_btn_dev) {
+               pm_wakeup_event(pwr_btn_dev, 0);
+               put_device(pwr_btn_dev);
+       }
 }
 
 /**
@@ -302,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
        /* ACPI 3.0 specs (P62) says that it's the responsibility
         * of the OSPM to clear the status bit [ implying that the
         * POWER_BUTTON event should not reach userspace ]
+        *
+        * However, we do generate a small hint for userspace in the form of
+        * a wakeup event. We flag this condition for now and generate the
+        * event later, as we're currently too early in resume to be able to
+        * generate wakeup events.
         */
-       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
-               acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+               acpi_event_status pwr_btn_status;
+
+               acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+               if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+                       acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+                       /* Flag for later */
+                       pwr_btn_event_pending = true;
+               }
+       }
 
        /*
         * Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -734,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
         * can wake the system.  _S0W may be valid, too.
         */
        if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-           (device_may_wakeup(dev) &&
-            adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+           (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+            adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
                acpi_status status;
 
                acpi_method[3] = 'W';
index 9f66181c814e78e2fc40c88ba743b5aa062e50e9..240a24400976929c663b9b3f8ce8bafa36435d5f 100644 (file)
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 {
        int result = 0;
 
-       if (!strncmp(val, "enable", strlen("enable") - 1)) {
+       if (!strncmp(val, "enable", strlen("enable"))) {
                result = acpi_debug_trace(trace_method_name, trace_debug_level,
                                          trace_debug_layer, 0);
                if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
                goto exit;
        }
 
-       if (!strncmp(val, "disable", strlen("disable") - 1)) {
+       if (!strncmp(val, "disable", strlen("disable"))) {
                int name = 0;
                result = acpi_debug_trace((char *)&name, trace_debug_level,
                                          trace_debug_layer, 0);
index 9577b6fa26507cad1db1f2ddf7d6f114abfdf513..1e0a9e17c31d805a201e8dbcbf7933efe8eec111 100644 (file)
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
        union acpi_object arg0 = { ACPI_TYPE_INTEGER };
        struct acpi_object_list args = { 1, &arg0 };
 
+       if (!video->cap._DOS)
+               return 0;
 
        if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
                return -EINVAL;
@@ -1687,10 +1689,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
        set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
        set_bit(KEY_DISPLAY_OFF, input->keybit);
 
-       error = input_register_device(input);
-       if (error)
-               goto err_stop_video;
-
        printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s  rom: %s  post: %s)\n",
               ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
               video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1699,16 @@ static int acpi_video_bus_add(struct acpi_device *device)
        video->pm_nb.priority = 0;
        error = register_pm_notifier(&video->pm_nb);
        if (error)
-               goto err_unregister_input_dev;
+               goto err_stop_video;
+
+       error = input_register_device(input);
+       if (error)
+               goto err_unregister_pm_notifier;
 
        return 0;
 
- err_unregister_input_dev:
-       input_unregister_device(input);
+ err_unregister_pm_notifier:
+       unregister_pm_notifier(&video->pm_nb);
  err_stop_video:
        acpi_video_bus_stop_devices(video);
  err_free_input_dev:
@@ -1743,9 +1745,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
        return 0;
 }
 
+static int __init is_i740(struct pci_dev *dev)
+{
+       if (dev->device == 0x00D1)
+               return 1;
+       if (dev->device == 0x7000)
+               return 1;
+       return 0;
+}
+
 static int __init intel_opregion_present(void)
 {
-#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
+       int opregion = 0;
        struct pci_dev *dev = NULL;
        u32 address;
 
@@ -1754,13 +1765,15 @@ static int __init intel_opregion_present(void)
                        continue;
                if (dev->vendor != PCI_VENDOR_ID_INTEL)
                        continue;
+               /* We don't want to poke around undefined i740 registers */
+               if (is_i740(dev))
+                       continue;
                pci_read_config_dword(dev, 0xfc, &address);
                if (!address)
                        continue;
-               return 1;
+               opregion = 1;
        }
-#endif
-       return 0;
+       return opregion;
 }
 
 int acpi_video_register(void)
index 3239517f4d902952654212f0a462d646fbd94608..ac6a5beb28f3b99e090f358257ef161476113c32 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller source file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
 
 module_platform_driver(arasan_cf_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
index e8cd652d20178c7c3ff8465aac997f1f65e20f63..98510931c8153110ee0617457fb63cafe652696a 100644 (file)
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
                        } else if (skb && card->using_dma) {
                                SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
                                                                       skb->len, PCI_DMA_TODEVICE);
+                               card->tx_skb[port] = skb;
                                iowrite32(SKB_CB(skb)->dma_addr,
                                          card->config_regs + TX_DMA_ADDR(port));
                        }
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
                db_fpga_upgrade = db_firmware_upgrade = 0;
        }
 
-       if (card->fpga_version >= DMA_SUPPORTED){
+       if (card->fpga_version >= DMA_SUPPORTED) {
+               pci_set_master(dev);
                card->using_dma = 1;
        } else {
                card->using_dma = 0;
index 1b1cbb571d38d72acce854b8fc6437b5323783c9..4b01ab3d2c249328c810aaa03aec450d7f8d2822 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/wait.h>
 #include <linux/async.h>
 #include <linux/pm_runtime.h>
+#include <scsi/scsi_scan.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -100,7 +101,7 @@ static void driver_deferred_probe_add(struct device *dev)
        mutex_lock(&deferred_probe_mutex);
        if (list_empty(&dev->p->deferred_probe)) {
                dev_dbg(dev, "Added to deferred list\n");
-               list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+               list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
        }
        mutex_unlock(&deferred_probe_mutex);
 }
@@ -332,6 +333,7 @@ void wait_for_device_probe(void)
        /* wait for the known devices to complete their probing */
        wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
        async_synchronize_full();
+       scsi_complete_async_scans();
 }
 EXPORT_SYMBOL_GPL(wait_for_device_probe);
 
index e0fb5b0435a350abaa5a23c30e76f4e5d691001a..9cb845e49334c78d27a958066cf33dfc665c4ac1 100644 (file)
@@ -1031,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        dpm_wait_for_children(dev, async);
 
        if (async_error)
-               return 0;
+               goto Complete;
 
        pm_runtime_get_noresume(dev);
        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (pm_wakeup_pending()) {
                pm_runtime_put_sync(dev);
                async_error = -EBUSY;
-               return 0;
+               goto Complete;
        }
 
        device_lock(dev);
@@ -1097,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
        device_unlock(dev);
+
+ Complete:
        complete_all(&dev->power.completion);
 
        if (error) {
index 5f6b2478bf1759717e9c85f1958207364f3f7520..fa6bf5279d28465f095c0829381854a718313e5e 100644 (file)
@@ -42,7 +42,7 @@ static int regmap_i2c_gather_write(void *context,
        /* If the I2C controller can't do a gather tell the core, it
         * will substitute in a linear write for us.
         */
-       if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+       if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
                return -ENOTSUPP;
 
        xfer[0].addr = i2c->addr;
index 0bcda488f11cd45e6252e113ffd025b058309e99..c89aa01fb1de8262838998dc68f778d5d7473d91 100644 (file)
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev,
                map->lock = regmap_lock_mutex;
                map->unlock = regmap_unlock_mutex;
        }
-       map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
        map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
        map->format.pad_bytes = config->pad_bits / 8;
        map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
-       map->format.buf_size += map->format.pad_bytes;
+       map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+                       config->val_bits + config->pad_bits, 8);
        map->reg_shift = config->pad_bits % 8;
        if (config->reg_stride)
                map->reg_stride = config->reg_stride;
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev,
 
        ret = regcache_init(map, config);
        if (ret < 0)
-               goto err_free_workbuf;
+               goto err_debugfs;
 
        /* Add a devres resource for dev_get_regmap() */
        m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev,
 
 err_cache:
        regcache_exit(map);
-err_free_workbuf:
+err_debugfs:
+       regmap_debugfs_exit(map);
        kfree(map->work_buf);
 err_map:
        kfree(map);
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
 
 /**
  * regmap_exit(): Free a previously allocated register map
index ba29b2e73d48936ab9a93a028abd0b0d9cd29691..72b5e7280d14792e6d83f3a59e4d4793d20fe28c 100644 (file)
@@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev)
        return &soc_dev->dev;
 }
 
-static mode_t soc_attribute_mode(struct kobject *kobj,
+static umode_t soc_attribute_mode(struct kobject *kobj,
                                  struct attribute *attr,
                                  int index)
 {
index a058842f14fdf54b92b495f01c34a02ed2b74bcc..61ce4054b3c33b3ed70e0de39d0e19a9f5167cf0 100644 (file)
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
                bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
                break;
        case 0x4331:
-               /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
+       case 43431:
+               /* Ext PA lines must be enabled for tx on BCM4331 */
+               bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
                break;
        case 43224:
                if (bus->chipinfo.rev == 0) {
index 9a96f14c8f474fba41442bbdcbe3bc910a31efc6..c32ebd537abe3a3e5f8f5e777f119c8768c9ea38 100644 (file)
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
                          bool enable)
 {
-       struct pci_dev *pdev = pc->core->bus->host_pci;
+       struct pci_dev *pdev;
        u32 coremask, tmp;
        int err = 0;
 
-       if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+       if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
                /* This bcma device is not on a PCI host-bus. So the IRQs are
                 * not routed through the PCI core.
                 * So we must not enable routing through the PCI core. */
                goto out;
        }
 
+       pdev = pc->core->bus->host_pci;
+
        err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
        if (err)
                goto out;
index c7f93359acb09affe99a398f45af7974ccdc30e3..f16f42d36071371414574305d6f057f9240f3bfd 100644 (file)
@@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus)
        if (!sprom)
                return -ENOMEM;
 
-       if (bus->chipinfo.id == 0x4331)
+       if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
 
        pr_debug("SPROM offset 0x%x\n", offset);
        bcma_sprom_read(bus, offset, sprom);
 
-       if (bus->chipinfo.id == 0x4331)
+       if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
 
        err = bcma_sprom_valid(sprom);
index e54e31b02b88eb6e927072745f345871a8d96203..3fbef018ce555fe47a716a0de01c2df837a2cc9e 100644 (file)
@@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
                + mdev->ldev->md.al_offset + mdev->al_tr_pos;
 
        if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
-               drbd_chk_io_error(mdev, 1, true);
+               drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
 
        if (++mdev->al_tr_pos >
            div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
@@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
        unsigned int enr, count = 0;
        struct lc_element *e;
 
-       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+       /* this should be an empty REQ_FLUSH */
+       if (size == 0)
+               return 0;
+
+       if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
                dev_err(DEV, "sector: %llus, size: %d\n",
                        (unsigned long long)sector, size);
                return 0;
index b5c5ff53cb57f74e89cc59bad8ca6e29df5e1db5..ba91b408abad75ce7da0ff173595ce23ee51f635 100644 (file)
@@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
 
        if (ctx->error) {
                dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
-               drbd_chk_io_error(mdev, 1, true);
+               drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
                err = -EIO; /* ctx->error ? */
        }
 
@@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
        wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
 
        if (ctx->error)
-               drbd_chk_io_error(mdev, 1, true);
+               drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
                /* that should force detach, so the in memory bitmap will be
                 * gone in a moment as well. */
 
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
                first_word = 0;
                spin_lock_irq(&b->bm_lock);
        }
-
        /* last page (respectively only page, for first page == last page) */
        last_word = MLPP(el >> LN2_BPL);
-       bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+       /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+        * ==> e = 32767, el = 32768, last_page = 2,
+        * and now last_word = 0.
+        * We do not want to touch last_page in this case,
+        * as we did not allocate it, it is not present in bitmap->bm_pages.
+        */
+       if (last_word)
+               bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
 
        /* possibly trailing bits.
         * example: (e & 63) == 63, el will be e+1.
index 02f013a073a75b66fe73c8658f1cde96ea7102a7..b2ca143d0053d75487e3a083a4271b9e5d0d285e 100644 (file)
@@ -813,7 +813,6 @@ enum {
        SIGNAL_ASENDER,         /* whether asender wants to be interrupted */
        SEND_PING,              /* whether asender should send a ping asap */
 
-       UNPLUG_QUEUED,          /* only relevant with kernel 2.4 */
        UNPLUG_REMOTE,          /* sending a "UnplugRemote" could help */
        MD_DIRTY,               /* current uuids and flags not yet on disk */
        DISCARD_CONCURRENT,     /* Set on one node, cleared on the peer! */
@@ -824,7 +823,6 @@ enum {
        CRASHED_PRIMARY,        /* This node was a crashed primary.
                                 * Gets cleared when the state.conn
                                 * goes into C_CONNECTED state. */
-       NO_BARRIER_SUPP,        /* underlying block device doesn't implement barriers */
        CONSIDER_RESYNC,
 
        MD_NO_FUA,              /* Users wants us to not use FUA/FLUSH on meta data dev */
@@ -834,6 +832,7 @@ enum {
        BITMAP_IO_QUEUED,       /* Started bitmap IO */
        GO_DISKLESS,            /* Disk is being detached, on io-error or admin request. */
        WAS_IO_ERROR,           /* Local disk failed returned IO error */
+       FORCE_DETACH,           /* Force-detach from local disk, aborting any pending local IO */
        RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
        NET_CONGESTED,          /* The data socket is congested */
 
@@ -851,6 +850,13 @@ enum {
        AL_SUSPENDED,           /* Activity logging is currently suspended. */
        AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
        STATE_SENT,             /* Do not change state/UUIDs while this is set */
+
+       CALLBACK_PENDING,       /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
+                                * pending, from drbd worker context.
+                                * If set, bdi_write_congested() returns true,
+                                * so shrink_page_list() would not recurse into,
+                                * and potentially deadlock on, this drbd worker.
+                                */
 };
 
 struct drbd_bitmap; /* opaque for drbd_conf */
@@ -1130,8 +1136,8 @@ struct drbd_conf {
        int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
        int rs_planed;    /* resync sectors already planned */
        atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
-       int peer_max_bio_size;
-       int local_max_bio_size;
+       unsigned int peer_max_bio_size;
+       unsigned int local_max_bio_size;
 };
 
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1435,9 +1441,9 @@ struct bm_extent {
  * hash table. */
 #define HT_SHIFT 8
 #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
-#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12)       /* Works always = 4k */
+#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
 
-#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
+#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
 
 /* Number of elements in the app_reads_hash */
 #define APP_R_HSIZE 15
@@ -1840,12 +1846,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
        return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
 }
 
+enum drbd_force_detach_flags {
+       DRBD_IO_ERROR,
+       DRBD_META_IO_ERROR,
+       DRBD_FORCE_DETACH,
+};
+
 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
-static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
+static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
+               enum drbd_force_detach_flags forcedetach,
+               const char *where)
 {
        switch (mdev->ldev->dc.on_io_error) {
        case EP_PASS_ON:
-               if (!forcedetach) {
+               if (forcedetach == DRBD_IO_ERROR) {
                        if (__ratelimit(&drbd_ratelimit_state))
                                dev_err(DEV, "Local IO failed in %s.\n", where);
                        if (mdev->state.disk > D_INCONSISTENT)
@@ -1856,6 +1870,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
        case EP_DETACH:
        case EP_CALL_HELPER:
                set_bit(WAS_IO_ERROR, &mdev->flags);
+               if (forcedetach == DRBD_FORCE_DETACH)
+                       set_bit(FORCE_DETACH, &mdev->flags);
                if (mdev->state.disk > D_FAILED) {
                        _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
                        dev_err(DEV,
@@ -1875,7 +1891,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
  */
 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
 static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
-       int error, int forcedetach, const char *where)
+       int error, enum drbd_force_detach_flags forcedetach, const char *where)
 {
        if (error) {
                unsigned long flags;
@@ -2405,15 +2421,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
        int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
 
        D_ASSERT(ap_bio >= 0);
+
+       if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
+               if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+                       drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+       }
+
        /* this currently does wake_up for every dec_ap_bio!
         * maybe rather introduce some type of hysteresis?
         * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
        if (ap_bio < mxb)
                wake_up(&mdev->misc_wait);
-       if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
-               if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
-                       drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
-       }
 }
 
 static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
index 920ede2829d6c5e467e177ac43a3e97e9f550aac..2e0e7fc1dbbaf50e24d8a446a4e20aeb2239f5bb 100644 (file)
@@ -1514,6 +1514,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
        /* Do not change the order of the if above and the two below... */
        if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
+               /* we probably will start a resync soon.
+                * make sure those things are properly reset. */
+               mdev->rs_total = 0;
+               mdev->rs_failed = 0;
+               atomic_set(&mdev->rs_pending_cnt, 0);
+               drbd_rs_cancel_all(mdev);
+
                drbd_send_uuids(mdev);
                drbd_send_state(mdev, ns);
        }
@@ -1630,9 +1637,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                        eh = mdev->ldev->dc.on_io_error;
                        was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
 
-                       /* Immediately allow completion of all application IO, that waits
-                          for completion from the local disk. */
-                       tl_abort_disk_io(mdev);
+                       if (was_io_error && eh == EP_CALL_HELPER)
+                               drbd_khelper(mdev, "local-io-error");
+
+                       /* Immediately allow completion of all application IO,
+                        * that waits for completion from the local disk,
+                        * if this was a force-detach due to disk_timeout
+                        * or administrator request (drbdsetup detach --force).
+                        * Do NOT abort otherwise.
+                        * Aborting local requests may cause serious problems,
+                        * if requests are completed to upper layers already,
+                        * and then later the already submitted local bio completes.
+                        * This can cause DMA into former bio pages that meanwhile
+                        * have been re-used for other things.
+                        * So aborting local requests may cause crashes,
+                        * or even worse, silent data corruption.
+                        */
+                       if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+                               tl_abort_disk_io(mdev);
 
                        /* current state still has to be D_FAILED,
                         * there is only one way out: to D_DISKLESS,
@@ -1653,9 +1675,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                        drbd_md_sync(mdev);
                }
                put_ldev(mdev);
-
-               if (was_io_error && eh == EP_CALL_HELPER)
-                       drbd_khelper(mdev, "local-io-error");
        }
 
         /* second half of local IO error, failure to attach,
@@ -1669,10 +1688,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                                 "ASSERT FAILED: disk is %s while going diskless\n",
                                 drbd_disk_str(mdev->state.disk));
 
-                mdev->rs_total = 0;
-                mdev->rs_failed = 0;
-                atomic_set(&mdev->rs_pending_cnt, 0);
-
                if (ns.conn >= C_CONNECTED)
                        drbd_send_state(mdev, ns);
 
@@ -2194,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
 {
        struct p_sizes p;
        sector_t d_size, u_size;
-       int q_order_type, max_bio_size;
+       int q_order_type;
+       unsigned int max_bio_size;
        int ok;
 
        if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -2203,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
                u_size = mdev->ldev->dc.disk_size;
                q_order_type = drbd_queue_order_type(mdev);
                max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
-               max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
+               max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
                put_ldev(mdev);
        } else {
                d_size = 0;
@@ -2214,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
 
        /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
        if (mdev->agreed_pro_version <= 94)
-               max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+               max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
 
        p.d_size = cpu_to_be64(d_size);
        p.u_size = cpu_to_be64(u_size);
@@ -3541,6 +3557,22 @@ static int drbd_congested(void *congested_data, int bdi_bits)
                goto out;
        }
 
+       if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+               r |= (1 << BDI_async_congested);
+               /* Without good local data, we would need to read from remote,
+                * and that would need the worker thread as well, which is
+                * currently blocked waiting for that usermode helper to
+                * finish.
+                */
+               if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+                       r |= (1 << BDI_sync_congested);
+               else
+                       put_ldev(mdev);
+               r &= bdi_bits;
+               reason = 'c';
+               goto out;
+       }
+
        if (get_ldev(mdev)) {
                q = bdev_get_queue(mdev->ldev->backing_bdev);
                r = bdi_congested(&q->backing_dev_info, bdi_bits);
@@ -3604,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
        q->backing_dev_info.congested_data = mdev;
 
        blk_queue_make_request(q, drbd_make_request);
+       blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
        /* Setting the max_hw_sectors to an odd value of 8kibyte here
           This triggers a max_bio_size message upon first attach or connect */
        blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
@@ -3870,7 +3903,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
        if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
                /* this was a try anyways ... */
                dev_err(DEV, "meta data update failed!\n");
-               drbd_chk_io_error(mdev, 1, true);
+               drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
        }
 
        /* Update mdev->ldev->md.la_size_sect,
@@ -3950,9 +3983,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
 
        spin_lock_irq(&mdev->req_lock);
        if (mdev->state.conn < C_CONNECTED) {
-               int peer;
+               unsigned int peer;
                peer = be32_to_cpu(buffer->la_peer_max_bio_size);
-               peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
+               peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
                mdev->peer_max_bio_size = peer;
        }
        spin_unlock_irq(&mdev->req_lock);
index 6d4de6a72e8069018b2b4d050b7ef2aa8b259259..fb9dce8daa2468c76992f4ab609adb471bc42af3 100644 (file)
@@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
        char *argv[] = {usermode_helper, cmd, mb, NULL };
        int ret;
 
+       if (current == mdev->worker.task)
+               set_bit(CALLBACK_PENDING, &mdev->flags);
+
        snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
 
        if (get_net_conf(mdev)) {
@@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
                                usermode_helper, cmd, mb,
                                (ret >> 8) & 0xff, ret);
 
+       if (current == mdev->worker.task)
+               clear_bit(CALLBACK_PENDING, &mdev->flags);
+
        if (ret < 0) /* Ignore any ERRNOs we got. */
                ret = 0;
 
@@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
 {
        struct request_queue * const q = mdev->rq_queue;
-       int max_hw_sectors = max_bio_size >> 9;
-       int max_segments = 0;
+       unsigned int max_hw_sectors = max_bio_size >> 9;
+       unsigned int max_segments = 0;
 
        if (get_ldev_if_state(mdev, D_ATTACHING)) {
                struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
@@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
 
 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
 {
-       int now, new, local, peer;
+       unsigned int now, new, local, peer;
 
        now = queue_max_hw_sectors(mdev->rq_queue) << 9;
        local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
@@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
                mdev->local_max_bio_size = local;
                put_ldev(mdev);
        }
+       local = min(local, DRBD_MAX_BIO_SIZE);
 
        /* We may ignore peer limits if the peer is modern enough.
           Because new from 8.3.8 onwards the peer can use multiple
           BIOs for a single peer_request */
        if (mdev->state.conn >= C_CONNECTED) {
                if (mdev->agreed_pro_version < 94) {
-                       peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+                       peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
                        /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
                } else if (mdev->agreed_pro_version == 94)
                        peer = DRBD_MAX_SIZE_H80_PACKET;
@@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
                        peer = DRBD_MAX_BIO_SIZE;
        }
 
-       new = min_t(int, local, peer);
+       new = min(local, peer);
 
        if (mdev->state.role == R_PRIMARY && new < now)
-               dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
+               dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
 
        if (new != now)
                dev_info(DEV, "max BIO size = %u\n", new);
@@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
         * to realize a "hot spare" feature (not that I'd recommend that) */
        wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
+       /* make sure there is no leftover from previous force-detach attempts */
+       clear_bit(FORCE_DETACH, &mdev->flags);
+
+       /* and no leftover from previously aborted resync or verify, either */
+       mdev->rs_total = 0;
+       mdev->rs_failed = 0;
+       atomic_set(&mdev->rs_pending_cnt, 0);
+
        /* allocation not in the IO path, cqueue thread context */
        nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
        if (!nbc) {
@@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
        }
 
        if (dt.detach_force) {
+               set_bit(FORCE_DETACH, &mdev->flags);
                drbd_force_state(mdev, NS(disk, D_FAILED));
                reply->ret_code = SS_SUCCESS;
                goto out;
@@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
        int retcode;
 
        /* If there is still bitmap IO pending, probably because of a previous
-        * resync just being finished, wait for it before requesting a new resync. */
+        * resync just being finished, wait for it before requesting a new resync.
+        * Also wait for it's after_state_ch(). */
        drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+       drbd_flush_workqueue(mdev);
 
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
 
@@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
        int retcode;
 
        /* If there is still bitmap IO pending, probably because of a previous
-        * resync just being finished, wait for it before requesting a new resync. */
+        * resync just being finished, wait for it before requesting a new resync.
+        * Also wait for it's after_state_ch(). */
        drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+       drbd_flush_workqueue(mdev);
 
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
 
index 869bada2ed06838a656d431584afde516a9cd115..5496104f90b9efe295704a271c95b724a8245145 100644 (file)
@@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
                    mdev->state.role == R_SECONDARY) {
                        seq_printf(seq, "%2d: cs:Unconfigured\n", i);
                } else {
+                       /* reset mdev->congestion_reason */
+                       bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
+
                        seq_printf(seq,
                           "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
                           "    ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
index ea4836e0ae9829e12206e482cc50b70678a3e4aa..c74ca2df7431f13553d366fd371d717b6ed1f56b 100644 (file)
@@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
        atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
        int i;
 
+       if (page == NULL)
+               return;
+
        if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
                i = page_chain_free(page);
        else {
@@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
                                     gfp_t gfp_mask) __must_hold(local)
 {
        struct drbd_epoch_entry *e;
-       struct page *page;
+       struct page *page = NULL;
        unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
 
        if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
@@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
                return NULL;
        }
 
-       page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
-       if (!page)
-               goto fail;
+       if (data_size) {
+               page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+               if (!page)
+                       goto fail;
+       }
 
        INIT_HLIST_NODE(&e->collision);
        e->epoch = NULL;
@@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
 
        data_size -= dgs;
 
-       ERR_IF(data_size == 0) return NULL;
        ERR_IF(data_size &  0x1ff) return NULL;
        ERR_IF(data_size >  DRBD_MAX_BIO_SIZE) return NULL;
 
@@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
        if (!e)
                return NULL;
 
+       if (!data_size)
+               return e;
+
        ds = data_size;
        page = e->pages;
        page_chain_for_each(page) {
@@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
        dp_flags = be32_to_cpu(p->dp_flags);
        rw |= wire_flags_to_bio(mdev, dp_flags);
+       if (e->pages == NULL) {
+               D_ASSERT(e->size == 0);
+               D_ASSERT(dp_flags & DP_FLUSH);
+       }
 
        if (dp_flags & DP_MAY_SET_IN_SYNC)
                e->flags |= EE_MAY_SET_IN_SYNC;
@@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev)
        mdev->ee_hash = NULL;
        mdev->ee_hash_s = 0;
 
-       /* paranoia code */
-       for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
-               if (h->first)
-                       dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
-                               (int)(h - mdev->tl_hash), h->first);
+       /* We may not have had the chance to wait for all locally pending
+        * application requests. The hlist_add_fake() prevents access after
+        * free on master bio completion. */
+       for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
+               struct drbd_request *req;
+               struct hlist_node *pos, *n;
+               hlist_for_each_entry_safe(req, pos, n, h, collision) {
+                       hlist_del_init(&req->collision);
+                       hlist_add_fake(&req->collision);
+               }
+       }
+
        kfree(mdev->tl_hash);
        mdev->tl_hash = NULL;
        mdev->tl_hash_s = 0;
index 9c5c84946b056792fa45d99051bd5c28750db7e1..910335c30927f0429a4c4b0fddcfe74033c45e8d 100644 (file)
@@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               __drbd_chk_io_error(mdev, false);
+               __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
                _req_may_be_done_not_susp(req, m);
                break;
 
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+               if (req->rq_state & RQ_LOCAL_ABORTED) {
+                       _req_may_be_done(req, m);
+                       break;
+               }
 
-               __drbd_chk_io_error(mdev, false);
+               __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
 
        goto_queue_for_net_read:
 
+               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
                if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+       int congested = 0;
+
+       /* If I don't even have good local storage, we can not reasonably try
+        * to pull ahead of the peer. We also need the local reference to make
+        * sure mdev->act_log is there.
+        * Note: caller has to make sure that net_conf is there.
+        */
+       if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+               return;
+
+       if (mdev->net_conf->cong_fill &&
+           atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+               dev_info(DEV, "Congestion-fill threshold reached\n");
+               congested = 1;
+       }
+
+       if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+               dev_info(DEV, "Congestion-extents threshold reached\n");
+               congested = 1;
+       }
+
+       if (congested) {
+               queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+               if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+                       _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+               else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+       }
+       put_ldev(mdev);
+}
+
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
                _req_mod(req, queue_for_send_oos);
 
        if (remote &&
-           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
-               int congested = 0;
-
-               if (mdev->net_conf->cong_fill &&
-                   atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
-                       dev_info(DEV, "Congestion-fill threshold reached\n");
-                       congested = 1;
-               }
-
-               if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
-                       dev_info(DEV, "Congestion-extents threshold reached\n");
-                       congested = 1;
-               }
-
-               if (congested) {
-                       queue_barrier(mdev); /* last barrier, after mirrored writes */
-
-                       if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
-                               _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
-                       else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
-                               _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
-               }
-       }
+           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+               maybe_pull_ahead(mdev);
 
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
@@ -1093,13 +1111,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
        /*
         * what we "blindly" assume:
         */
-       D_ASSERT(bio->bi_size > 0);
        D_ASSERT((bio->bi_size & 0x1ff) == 0);
 
        /* to make some things easier, force alignment of requests within the
         * granularity of our hash tables */
        s_enr = bio->bi_sector >> HT_SHIFT;
-       e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
+       e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
 
        if (likely(s_enr == e_enr)) {
                do {
@@ -1257,7 +1274,7 @@ void request_timer_fn(unsigned long data)
                 time_after(now, req->start_time + dt) &&
                !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
                dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
-               __drbd_chk_io_error(mdev, 1);
+               __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
        }
        nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
        spin_unlock_irq(&mdev->req_lock);
index 620c70ff223118e6f259a200512203f5010e2cda..6bce2cc179d4112980673b1b9d82ae91d70d390f 100644 (file)
@@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
        if (list_empty(&mdev->read_ee))
                wake_up(&mdev->ee_wait);
        if (test_bit(__EE_WAS_ERROR, &e->flags))
-               __drbd_chk_io_error(mdev, false);
+               __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
        spin_unlock_irqrestore(&mdev->req_lock, flags);
 
        drbd_queue_work(&mdev->data.work, &e->w);
@@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
                : list_empty(&mdev->active_ee);
 
        if (test_bit(__EE_WAS_ERROR, &e->flags))
-               __drbd_chk_io_error(mdev, false);
+               __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
        spin_unlock_irqrestore(&mdev->req_lock, flags);
 
        if (is_syncer_req)
@@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
                return;
        }
 
-       if (mdev->state.conn < C_AHEAD) {
-               /* In case a previous resync run was aborted by an IO error/detach on the peer. */
-               drbd_rs_cancel_all(mdev);
-               /* This should be done when we abort the resync. We definitely do not
-                  want to have this for connections going back and forth between
-                  Ahead/Behind and SyncSource/SyncTarget */
-       }
-
        if (side == C_SYNC_TARGET) {
                /* Since application IO was locked out during C_WF_BITMAP_T and
                   C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
index 9d6ef68e2f1b4433f705d846bd536a47cdaf92d4..a7d6347aaa7913b2a029014a95a2558d8360597e 100644 (file)
@@ -672,6 +672,7 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
+       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
index bbca966f8f66a20b04f62ac01ff41cd5ef18c037..3bba65510d23afdf39070f7d23552e8e15e27af2 100644 (file)
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
        struct gendisk *disk;
        int err;
 
+       err = -ENOMEM;
        lo = kzalloc(sizeof(*lo), GFP_KERNEL);
-       if (!lo) {
-               err = -ENOMEM;
+       if (!lo)
                goto out;
-       }
 
-       err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
-       if (err < 0)
+       if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
                goto out_free_dev;
 
        if (i >= 0) {
index 304000c3d433f1d98afe6c0f0feea8553dc7bd04..a8fddeb3d638ebcdf9a0191beda816d8f1338ccd 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
+#include <linux/debugfs.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
  * allocated in mtip_init().
  */
 static int mtip_major;
+static struct dentry *dfs_parent;
 
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
@@ -294,18 +296,16 @@ static int hba_reset_nosleep(struct driver_data *dd)
  */
 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
 {
-       unsigned long flags = 0;
-
        atomic_set(&port->commands[tag].active, 1);
 
-       spin_lock_irqsave(&port->cmd_issue_lock, flags);
+       spin_lock(&port->cmd_issue_lock);
 
        writel((1 << MTIP_TAG_BIT(tag)),
                        port->s_active[MTIP_TAG_INDEX(tag)]);
        writel((1 << MTIP_TAG_BIT(tag)),
                        port->cmd_issue[MTIP_TAG_INDEX(tag)]);
 
-       spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+       spin_unlock(&port->cmd_issue_lock);
 
        /* Set the command's timeout value.*/
        port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +436,7 @@ static void mtip_init_port(struct mtip_port *port)
                writel(0xFFFFFFFF, port->completed[i]);
 
        /* Clear any pending interrupts for this port */
-       writel(readl(port->dd->mmio + PORT_IRQ_STAT),
-                                       port->dd->mmio + PORT_IRQ_STAT);
+       writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
 
        /* Clear any pending interrupts on the HBA. */
        writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +781,24 @@ static void mtip_handle_tfe(struct driver_data *dd)
 
        /* Stop the timer to prevent command timeouts. */
        del_timer(&port->cmd_timer);
+       set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+
+       if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
+                       test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+               cmd = &port->commands[MTIP_TAG_INTERNAL];
+               dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+
+               atomic_inc(&cmd->active); /* active > 1 indicates error */
+               if (cmd->comp_data && cmd->comp_func) {
+                       cmd->comp_func(port, MTIP_TAG_INTERNAL,
+                                       cmd->comp_data, PORT_IRQ_TF_ERR);
+               }
+               goto handle_tfe_exit;
+       }
 
        /* clear the tag accumulator */
        memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
 
-       /* Set eh_active */
-       set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
        /* Loop through all the groups */
        for (group = 0; group < dd->slot_groups; group++) {
                completed = readl(port->completed[group]);
@@ -940,6 +950,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
        }
        print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
 
+handle_tfe_exit:
        /* clear eh_active */
        clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
        wake_up_interruptible(&port->svc_wait);
@@ -961,6 +972,8 @@ static inline void mtip_process_sdbf(struct driver_data *dd)
        /* walk all bits in all slot groups */
        for (group = 0; group < dd->slot_groups; group++) {
                completed = readl(port->completed[group]);
+               if (!completed)
+                       continue;
 
                /* clear completed status register in the hardware.*/
                writel(completed, port->completed[group]);
@@ -1329,22 +1342,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
                        }
                        rv = -EAGAIN;
                }
-
-               if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
-                       & (1 << MTIP_TAG_INTERNAL)) {
-                       dev_warn(&port->dd->pdev->dev,
-                               "Retiring internal command but CI is 1.\n");
-                       if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
-                                               &port->dd->dd_flag)) {
-                               hba_reset_nosleep(port->dd);
-                               rv = -ENXIO;
-                       } else {
-                               mtip_restart_port(port);
-                               rv = -EAGAIN;
-                       }
-                       goto exec_ic_exit;
-               }
-
        } else {
                /* Spin for <timeout> checking if command still outstanding */
                timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,21 +1358,25 @@ static int mtip_exec_internal_command(struct mtip_port *port,
                                rv = -ENXIO;
                                goto exec_ic_exit;
                        }
+                       if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+                               atomic_inc(&int_cmd->active); /* error */
+                               break;
+                       }
                }
+       }
 
-               if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+       if (atomic_read(&int_cmd->active) > 1) {
+               dev_err(&port->dd->pdev->dev,
+                       "Internal command [%02X] failed\n", fis->command);
+               rv = -EIO;
+       }
+       if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
                        & (1 << MTIP_TAG_INTERNAL)) {
-                       dev_err(&port->dd->pdev->dev,
-                               "Internal command did not complete [atomic]\n");
+               rv = -ENXIO;
+               if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+                                       &port->dd->dd_flag)) {
+                       mtip_restart_port(port);
                        rv = -EAGAIN;
-                       if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
-                                               &port->dd->dd_flag)) {
-                               hba_reset_nosleep(port->dd);
-                               rv = -ENXIO;
-                       } else {
-                               mtip_restart_port(port);
-                               rv = -EAGAIN;
-                       }
                }
        }
 exec_ic_exit:
@@ -1893,13 +1894,33 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
                                void __user *user_buffer)
 {
        struct host_to_dev_fis  fis;
-       struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+       struct host_to_dev_fis *reply;
+       u8 *buf = NULL;
+       dma_addr_t dma_addr = 0;
+       int rv = 0, xfer_sz = command[3];
+
+       if (xfer_sz) {
+               if (user_buffer)
+                       return -EFAULT;
+
+               buf = dmam_alloc_coherent(&port->dd->pdev->dev,
+                               ATA_SECT_SIZE * xfer_sz,
+                               &dma_addr,
+                               GFP_KERNEL);
+               if (!buf) {
+                       dev_err(&port->dd->pdev->dev,
+                               "Memory allocation failed (%d bytes)\n",
+                               ATA_SECT_SIZE * xfer_sz);
+                       return -ENOMEM;
+               }
+               memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
+       }
 
        /* Build the FIS. */
        memset(&fis, 0, sizeof(struct host_to_dev_fis));
-       fis.type                = 0x27;
-       fis.opts                = 1 << 7;
-       fis.command             = command[0];
+       fis.type        = 0x27;
+       fis.opts        = 1 << 7;
+       fis.command     = command[0];
        fis.features    = command[2];
        fis.sect_count  = command[3];
        if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1929,11 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
                fis.cyl_hi      = 0xC2;
        }
 
+       if (xfer_sz)
+               reply = (port->rxfis + RX_FIS_PIO_SETUP);
+       else
+               reply = (port->rxfis + RX_FIS_D2H_REG);
+
        dbg_printk(MTIP_DRV_NAME
                " %s: User Command: cmd %x, sect %x, "
                "feat %x, sectcnt %x\n",
@@ -1917,43 +1943,46 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
                command[2],
                command[3]);
 
-       memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
-
        /* Execute the command. */
        if (mtip_exec_internal_command(port,
                                &fis,
                                 5,
-                                port->sector_buffer_dma,
-                                (command[3] != 0) ? ATA_SECT_SIZE : 0,
+                                (xfer_sz ? dma_addr : 0),
+                                (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
                                 0,
                                 GFP_KERNEL,
                                 MTIP_IOCTL_COMMAND_TIMEOUT_MS)
                                 < 0) {
-               return -1;
+               rv = -EFAULT;
+               goto exit_drive_command;
        }
 
        /* Collect the completion status. */
        command[0] = reply->command; /* Status*/
        command[1] = reply->features; /* Error*/
-       command[2] = command[3];
+       command[2] = reply->sect_count;
 
        dbg_printk(MTIP_DRV_NAME
                " %s: Completion Status: stat %x, "
-               "err %x, cmd %x\n",
+               "err %x, nsect %x\n",
                __func__,
                command[0],
                command[1],
                command[2]);
 
-       if (user_buffer && command[3]) {
+       if (xfer_sz) {
                if (copy_to_user(user_buffer,
-                                port->sector_buffer,
+                                buf,
                                 ATA_SECT_SIZE * command[3])) {
-                       return -EFAULT;
+                       rv = -EFAULT;
+                       goto exit_drive_command;
                }
        }
-
-       return 0;
+exit_drive_command:
+       if (buf)
+               dmam_free_coherent(&port->dd->pdev->dev,
+                               ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
+       return rv;
 }
 
 /*
@@ -2003,6 +2032,32 @@ static unsigned int implicit_sector(unsigned char command,
        return rv;
 }
 
+static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+{
+       switch (fis->command) {
+       case ATA_CMD_DOWNLOAD_MICRO:
+               *timeout = 120000; /* 2 minutes */
+               break;
+       case ATA_CMD_SEC_ERASE_UNIT:
+       case 0xFC:
+               *timeout = 240000; /* 4 minutes */
+               break;
+       case ATA_CMD_STANDBYNOW1:
+               *timeout = 10000;  /* 10 seconds */
+               break;
+       case 0xF7:
+       case 0xFA:
+               *timeout = 60000;  /* 60 seconds */
+               break;
+       case ATA_CMD_SMART:
+               *timeout = 15000;  /* 15 seconds */
+               break;
+       default:
+               *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+               break;
+       }
+}
+
 /*
  * Executes a taskfile
  * See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2078,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
        unsigned int taskin = 0;
        unsigned int taskout = 0;
        u8 nsect = 0;
-       unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+       unsigned int timeout;
        unsigned int force_single_sector;
        unsigned int transfer_size;
        unsigned long task_file_data;
@@ -2153,32 +2208,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
                fis.lba_hi,
                fis.device);
 
-       switch (fis.command) {
-       case ATA_CMD_DOWNLOAD_MICRO:
-               /* Change timeout for Download Microcode to 2 minutes */
-               timeout = 120000;
-               break;
-       case ATA_CMD_SEC_ERASE_UNIT:
-               /* Change timeout for Security Erase Unit to 4 minutes.*/
-               timeout = 240000;
-               break;
-       case ATA_CMD_STANDBYNOW1:
-               /* Change timeout for standby immediate to 10 seconds.*/
-               timeout = 10000;
-               break;
-       case 0xF7:
-       case 0xFA:
-               /* Change timeout for vendor unique command to 10 secs */
-               timeout = 10000;
-               break;
-       case ATA_CMD_SMART:
-               /* Change timeout for vendor unique command to 15 secs */
-               timeout = 15000;
-               break;
-       default:
-               timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
-               break;
-       }
+       mtip_set_timeout(&fis, &timeout);
 
        /* Determine the correct transfer size.*/
        if (force_single_sector)
@@ -2295,13 +2325,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
 {
        switch (cmd) {
        case HDIO_GET_IDENTITY:
-               if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
-                       dev_warn(&dd->pdev->dev,
-                               "Unable to read identity\n");
-                       return -EIO;
-               }
-
+       {
+               if (copy_to_user((void __user *)arg, dd->port->identify,
+                                               sizeof(u16) * ATA_ID_WORDS))
+                       return -EFAULT;
                break;
+       }
        case HDIO_DRIVE_CMD:
        {
                u8 drive_command[4];
@@ -2519,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 }
 
 /*
- * Sysfs register/status dump.
+ * Sysfs status dump.
  *
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2528,72 +2557,138 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  * return value
  *     The size, in bytes, of the data copied into buf.
  */
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
 {
-       u32 group_allocated;
        struct driver_data *dd = dev_to_disk(dev)->private_data;
        int size = 0;
+
+       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "thermal_shutdown\n");
+       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "write_protect\n");
+       else
+               size += sprintf(buf, "%s", "online\n");
+
+       return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
+{
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       u32 group_allocated;
+       int size = *offset;
        int n;
 
-       size += sprintf(&buf[size], "S ACTive:\n");
+       if (!len || size)
+               return 0;
+
+       if (size < 0)
+               return -EINVAL;
+
+       size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
-       for (n = 0; n < dd->slot_groups; n++)
-               size += sprintf(&buf[size], "0x%08x\n",
+       for (n = dd->slot_groups-1; n >= 0; n--)
+               size += sprintf(&buf[size], "%08X ",
                                         readl(dd->port->s_active[n]));
 
-       size += sprintf(&buf[size], "Command Issue:\n");
+       size += sprintf(&buf[size], "]\n");
+       size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
 
-       for (n = 0; n < dd->slot_groups; n++)
-               size += sprintf(&buf[size], "0x%08x\n",
+       for (n = dd->slot_groups-1; n >= 0; n--)
+               size += sprintf(&buf[size], "%08X ",
                                        readl(dd->port->cmd_issue[n]));
 
-       size += sprintf(&buf[size], "Allocated:\n");
+       size += sprintf(&buf[size], "]\n");
+       size += sprintf(&buf[size], "H/ Completed     : [ 0x");
+
+       for (n = dd->slot_groups-1; n >= 0; n--)
+               size += sprintf(&buf[size], "%08X ",
+                               readl(dd->port->completed[n]));
 
-       for (n = 0; n < dd->slot_groups; n++) {
+       size += sprintf(&buf[size], "]\n");
+       size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
+                               readl(dd->port->mmio + PORT_IRQ_STAT));
+       size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
+                               readl(dd->mmio + HOST_IRQ_STAT));
+       size += sprintf(&buf[size], "\n");
+
+       size += sprintf(&buf[size], "L/ Allocated     : [ 0x");
+
+       for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
                        group_allocated =
                                dd->port->allocated[n/2] >> (32*(n&1));
                else
                        group_allocated = dd->port->allocated[n];
-               size += sprintf(&buf[size], "0x%08x\n",
-                                group_allocated);
+               size += sprintf(&buf[size], "%08X ", group_allocated);
        }
+       size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "Completed:\n");
+       size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
 
-       for (n = 0; n < dd->slot_groups; n++)
-               size += sprintf(&buf[size], "0x%08x\n",
-                               readl(dd->port->completed[n]));
+       for (n = dd->slot_groups-1; n >= 0; n--) {
+               if (sizeof(long) > sizeof(u32))
+                       group_allocated =
+                               dd->port->cmds_to_issue[n/2] >> (32*(n&1));
+               else
+                       group_allocated = dd->port->cmds_to_issue[n];
+               size += sprintf(&buf[size], "%08X ", group_allocated);
+       }
+       size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
-                               readl(dd->port->mmio + PORT_IRQ_STAT));
-       size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
-                               readl(dd->mmio + HOST_IRQ_STAT));
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
 
-       return size;
+       return *offset;
 }
 
-static ssize_t mtip_hw_show_status(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
 {
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       int size = *offset;
 
-       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "thermal_shutdown\n");
-       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "write_protect\n");
-       else
-               size += sprintf(buf, "%s", "online\n");
+       if (!len || size)
+               return 0;
 
-       return size;
+       if (size < 0)
+               return -EINVAL;
+
+       size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
+                                                       dd->port->flags);
+       size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
+                                                       dd->dd_flag);
+
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+static const struct file_operations mtip_regs_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_registers,
+       .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_flags,
+       .llseek = no_llseek,
+};
 
 /*
  * Create the sysfs related attributes.
@@ -2610,9 +2705,6 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       if (sysfs_create_file(kobj, &dev_attr_registers.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'registers' sysfs entry\n");
        if (sysfs_create_file(kobj, &dev_attr_status.attr))
                dev_warn(&dd->pdev->dev,
                        "Error creating 'status' sysfs entry\n");
@@ -2634,12 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       sysfs_remove_file(kobj, &dev_attr_registers.attr);
        sysfs_remove_file(kobj, &dev_attr_status.attr);
 
        return 0;
 }
 
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+       if (!dfs_parent)
+               return -1;
+
+       dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+       if (IS_ERR_OR_NULL(dd->dfs_node)) {
+               dev_warn(&dd->pdev->dev,
+                       "Error creating node %s under debugfs\n",
+                                               dd->disk->disk_name);
+               dd->dfs_node = NULL;
+               return -1;
+       }
+
+       debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_flags_fops);
+       debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_regs_fops);
+
+       return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+       debugfs_remove_recursive(dd->dfs_node);
+}
+
+
 /*
  * Perform any init/resume time hardware setup
  *
@@ -3634,7 +3753,10 @@ skip_create_disk:
        set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
        blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
        blk_queue_physical_block_size(dd->queue, 4096);
+       blk_queue_max_hw_sectors(dd->queue, 0xffff);
+       blk_queue_max_segment_size(dd->queue, 0x400000);
        blk_queue_io_min(dd->queue, 4096);
+
        /*
         * write back cache is not supported in the device. FUA depends on
         * write back cache support, hence setting flush support to zero.
@@ -3662,6 +3784,7 @@ skip_create_disk:
                mtip_hw_sysfs_init(dd, kobj);
                kobject_put(kobj);
        }
+       mtip_hw_debugfs_init(dd);
 
        if (dd->mtip_svc_handler) {
                set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3687,6 +3810,8 @@ start_service_thread:
        return rv;
 
 kthread_run_error:
+       mtip_hw_debugfs_exit(dd);
+
        /* Delete our gendisk. This also removes the device from /dev */
        del_gendisk(dd->disk);
 
@@ -3737,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
                        kobject_put(kobj);
                }
        }
+       mtip_hw_debugfs_exit(dd);
 
        /*
         * Delete our gendisk structure. This also removes the device
@@ -4084,10 +4210,20 @@ static int __init mtip_init(void)
        }
        mtip_major = error;
 
+       if (!dfs_parent) {
+               dfs_parent = debugfs_create_dir("rssd", NULL);
+               if (IS_ERR_OR_NULL(dfs_parent)) {
+                       printk(KERN_WARNING "Error creating debugfs parent\n");
+                       dfs_parent = NULL;
+               }
+       }
+
        /* Register our PCI operations. */
        error = pci_register_driver(&mtip_pci_driver);
-       if (error)
+       if (error) {
+               debugfs_remove(dfs_parent);
                unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+       }
 
        return error;
 }
@@ -4104,6 +4240,8 @@ static int __init mtip_init(void)
  */
 static void __exit mtip_exit(void)
 {
+       debugfs_remove_recursive(dfs_parent);
+
        /* Release the allocated major block device number. */
        unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 
index 4ef58336310a126af9b4d0847dc4099e4af23610..f51fc23d17bb0e0025c74ac9f495007dcbc8b784 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/genhd.h>
-#include <linux/version.h>
 
 /* Offset of Subsystem Device ID in pci confoguration space */
 #define PCI_SUBSYSTEM_DEVICEID 0x2E
  #define dbg_printk(format, arg...)
 #endif
 
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
 #define __force_bit2int (unsigned int __force)
 
-/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_PF_IC_ACTIVE_BIT          0 /* pio/ioctl */
-#define MTIP_PF_EH_ACTIVE_BIT          1 /* error handling */
-#define MTIP_PF_SE_ACTIVE_BIT          2 /* secure erase */
-#define MTIP_PF_DM_ACTIVE_BIT          3 /* download microcde */
-#define MTIP_PF_PAUSE_IO       ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+enum {
+       /* below are bit numbers in 'flags' defined in mtip_port */
+       MTIP_PF_IC_ACTIVE_BIT       = 0, /* pio/ioctl */
+       MTIP_PF_EH_ACTIVE_BIT       = 1, /* error handling */
+       MTIP_PF_SE_ACTIVE_BIT       = 2, /* secure erase */
+       MTIP_PF_DM_ACTIVE_BIT       = 3, /* download microcde */
+       MTIP_PF_PAUSE_IO      = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
                                (1 << MTIP_PF_EH_ACTIVE_BIT) | \
                                (1 << MTIP_PF_SE_ACTIVE_BIT) | \
-                               (1 << MTIP_PF_DM_ACTIVE_BIT))
-
-#define MTIP_PF_SVC_THD_ACTIVE_BIT     4
-#define MTIP_PF_ISSUE_CMDS_BIT         5
-#define MTIP_PF_REBUILD_BIT            6
-#define MTIP_PF_SVC_THD_STOP_BIT       8
-
-/* below are bit numbers in 'dd_flag' defined in driver_data */
-#define MTIP_DDF_REMOVE_PENDING_BIT    1
-#define MTIP_DDF_OVER_TEMP_BIT         2
-#define MTIP_DDF_WRITE_PROTECT_BIT     3
-#define MTIP_DDF_STOP_IO       ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+                               (1 << MTIP_PF_DM_ACTIVE_BIT)),
+
+       MTIP_PF_SVC_THD_ACTIVE_BIT  = 4,
+       MTIP_PF_ISSUE_CMDS_BIT      = 5,
+       MTIP_PF_REBUILD_BIT         = 6,
+       MTIP_PF_SVC_THD_STOP_BIT    = 8,
+
+       /* below are bit numbers in 'dd_flag' defined in driver_data */
+       MTIP_DDF_REMOVE_PENDING_BIT = 1,
+       MTIP_DDF_OVER_TEMP_BIT      = 2,
+       MTIP_DDF_WRITE_PROTECT_BIT  = 3,
+       MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
                                (1 << MTIP_DDF_OVER_TEMP_BIT) | \
-                               (1 << MTIP_DDF_WRITE_PROTECT_BIT))
+                               (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
 
-#define MTIP_DDF_CLEANUP_BIT           5
-#define MTIP_DDF_RESUME_BIT            6
-#define MTIP_DDF_INIT_DONE_BIT         7
-#define MTIP_DDF_REBUILD_FAILED_BIT    8
+       MTIP_DDF_CLEANUP_BIT        = 5,
+       MTIP_DDF_RESUME_BIT         = 6,
+       MTIP_DDF_INIT_DONE_BIT      = 7,
+       MTIP_DDF_REBUILD_FAILED_BIT = 8,
+};
 
 __packed struct smart_attr{
        u8 attr_id;
@@ -445,6 +448,8 @@ struct driver_data {
        unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
        struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+       struct dentry *dfs_node;
 };
 
 #endif
index 061427a75d375a5ed0655bdac8422e668a252a5e..8957b9f0cfaddee18e9b8a47ac50d22db4bfb7c2 100644 (file)
@@ -481,7 +481,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
                nbd_end_request(req);
        } else {
                spin_lock(&nbd->queue_lock);
-               list_add(&req->queuelist, &nbd->queue_head);
+               list_add_tail(&req->queuelist, &nbd->queue_head);
                spin_unlock(&nbd->queue_lock);
        }
 
index 013c7a549fb6dbc3d5d1afe2e01730e951846507..8f428a8ab003d8c7a029036eea6a1666a96d7dd3 100644 (file)
@@ -141,7 +141,7 @@ struct rbd_request {
 struct rbd_snap {
        struct  device          dev;
        const char              *name;
-       size_t                  size;
+       u64                     size;
        struct list_head        node;
        u64                     id;
 };
@@ -175,8 +175,7 @@ struct rbd_device {
        /* protects updating the header */
        struct rw_semaphore     header_rwsem;
        char                    snap_name[RBD_MAX_SNAP_NAME_LEN];
-       u32 cur_snap;   /* index+1 of current snapshot within snap context
-                          0 - for the head */
+       u64                     snap_id;        /* current snapshot id */
        int read_only;
 
        struct list_head        node;
@@ -241,7 +240,7 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
        put_device(&rbd_dev->dev);
 }
 
-static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+static int __rbd_refresh_header(struct rbd_device *rbd_dev);
 
 static int rbd_open(struct block_device *bdev, fmode_t mode)
 {
@@ -450,7 +449,9 @@ static void rbd_client_release(struct kref *kref)
        struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
 
        dout("rbd_release_client %p\n", rbdc);
+       spin_lock(&rbd_client_list_lock);
        list_del(&rbdc->node);
+       spin_unlock(&rbd_client_list_lock);
 
        ceph_destroy_client(rbdc->client);
        kfree(rbdc->rbd_opts);
@@ -463,9 +464,7 @@ static void rbd_client_release(struct kref *kref)
  */
 static void rbd_put_client(struct rbd_device *rbd_dev)
 {
-       spin_lock(&rbd_client_list_lock);
        kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
-       spin_unlock(&rbd_client_list_lock);
        rbd_dev->rbd_client = NULL;
 }
 
@@ -487,18 +486,20 @@ static void rbd_coll_release(struct kref *kref)
  */
 static int rbd_header_from_disk(struct rbd_image_header *header,
                                 struct rbd_image_header_ondisk *ondisk,
-                                int allocated_snaps,
+                                u32 allocated_snaps,
                                 gfp_t gfp_flags)
 {
-       int i;
-       u32 snap_count;
+       u32 i, snap_count;
 
        if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
                return -ENXIO;
 
        snap_count = le32_to_cpu(ondisk->snap_count);
+       if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
+                        / sizeof (*ondisk))
+               return -EINVAL;
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
-                               snap_count * sizeof (*ondisk),
+                               snap_count * sizeof(u64),
                                gfp_flags);
        if (!header->snapc)
                return -ENOMEM;
@@ -506,11 +507,11 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
        header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
        if (snap_count) {
                header->snap_names = kmalloc(header->snap_names_len,
-                                            GFP_KERNEL);
+                                            gfp_flags);
                if (!header->snap_names)
                        goto err_snapc;
                header->snap_sizes = kmalloc(snap_count * sizeof(u64),
-                                            GFP_KERNEL);
+                                            gfp_flags);
                if (!header->snap_sizes)
                        goto err_names;
        } else {
@@ -552,21 +553,6 @@ err_snapc:
        return -ENOMEM;
 }
 
-static int snap_index(struct rbd_image_header *header, int snap_num)
-{
-       return header->total_snaps - snap_num;
-}
-
-static u64 cur_snap_id(struct rbd_device *rbd_dev)
-{
-       struct rbd_image_header *header = &rbd_dev->header;
-
-       if (!rbd_dev->cur_snap)
-               return 0;
-
-       return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
-}
-
 static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
                        u64 *seq, u64 *size)
 {
@@ -605,7 +591,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
                        snapc->seq = header->snap_seq;
                else
                        snapc->seq = 0;
-               dev->cur_snap = 0;
+               dev->snap_id = CEPH_NOSNAP;
                dev->read_only = 0;
                if (size)
                        *size = header->image_size;
@@ -613,8 +599,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
                ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
                if (ret < 0)
                        goto done;
-
-               dev->cur_snap = header->total_snaps - ret;
+               dev->snap_id = snapc->seq;
                dev->read_only = 1;
        }
 
@@ -935,7 +920,6 @@ static int rbd_do_request(struct request *rq,
        layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
        layout->fl_stripe_count = cpu_to_le32(1);
        layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
-       layout->fl_pg_preferred = cpu_to_le32(-1);
        layout->fl_pg_pool = cpu_to_le32(dev->poolid);
        ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
                                req, ops);
@@ -993,7 +977,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
        op = (void *)(replyhead + 1);
        rc = le32_to_cpu(replyhead->result);
        bytes = le64_to_cpu(op->extent.length);
-       read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
+       read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
 
        dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
 
@@ -1168,7 +1152,7 @@ static int rbd_req_read(struct request *rq,
                         int coll_index)
 {
        return rbd_do_op(rq, rbd_dev, NULL,
-                        (snapid ? snapid : CEPH_NOSNAP),
+                        snapid,
                         CEPH_OSD_OP_READ,
                         CEPH_OSD_FLAG_READ,
                         2,
@@ -1187,7 +1171,7 @@ static int rbd_req_sync_read(struct rbd_device *dev,
                          u64 *ver)
 {
        return rbd_req_sync_op(dev, NULL,
-                              (snapid ? snapid : CEPH_NOSNAP),
+                              snapid,
                               CEPH_OSD_OP_READ,
                               CEPH_OSD_FLAG_READ,
                               NULL,
@@ -1238,7 +1222,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
        dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
                notify_id, (int)opcode);
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-       rc = __rbd_update_snaps(dev);
+       rc = __rbd_refresh_header(dev);
        mutex_unlock(&ctl_mutex);
        if (rc)
                pr_warning(RBD_DRV_NAME "%d got notification but failed to "
@@ -1521,7 +1505,7 @@ static void rbd_rq_fn(struct request_queue *q)
                                              coll, cur_seg);
                        else
                                rbd_req_read(rq, rbd_dev,
-                                            cur_snap_id(rbd_dev),
+                                            rbd_dev->snap_id,
                                             ofs,
                                             op_size, bio,
                                             coll, cur_seg);
@@ -1592,7 +1576,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
 {
        ssize_t rc;
        struct rbd_image_header_ondisk *dh;
-       int snap_count = 0;
+       u32 snap_count = 0;
        u64 ver;
        size_t len;
 
@@ -1656,7 +1640,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
        struct ceph_mon_client *monc;
 
        /* we should create a snapshot only if we're pointing at the head */
-       if (dev->cur_snap)
+       if (dev->snap_id != CEPH_NOSNAP)
                return -EINVAL;
 
        monc = &dev->rbd_client->client->monc;
@@ -1683,7 +1667,9 @@ static int rbd_header_add_snap(struct rbd_device *dev,
        if (ret < 0)
                return ret;
 
-       dev->header.snapc->seq =  new_snapid;
+       down_write(&dev->header_rwsem);
+       dev->header.snapc->seq = new_snapid;
+       up_write(&dev->header_rwsem);
 
        return 0;
 bad:
@@ -1703,7 +1689,7 @@ static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
 /*
  * only read the first part of the ondisk header, without the snaps info
  */
-static int __rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev)
 {
        int ret;
        struct rbd_image_header h;
@@ -1890,7 +1876,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
 
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
 
-       rc = __rbd_update_snaps(rbd_dev);
+       rc = __rbd_refresh_header(rbd_dev);
        if (rc < 0)
                ret = rc;
 
@@ -1949,7 +1935,7 @@ static ssize_t rbd_snap_size_show(struct device *dev,
 {
        struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
 
-       return sprintf(buf, "%zd\n", snap->size);
+       return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
 }
 
 static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1958,7 +1944,7 @@ static ssize_t rbd_snap_id_show(struct device *dev,
 {
        struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
 
-       return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
+       return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
 }
 
 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2173,7 +2159,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
                                         rbd_dev->header.obj_version);
                if (ret == -ERANGE) {
                        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-                       rc = __rbd_update_snaps(rbd_dev);
+                       rc = __rbd_refresh_header(rbd_dev);
                        mutex_unlock(&ctl_mutex);
                        if (rc < 0)
                                return rc;
@@ -2558,7 +2544,7 @@ static ssize_t rbd_snap_add(struct device *dev,
        if (ret < 0)
                goto err_unlock;
 
-       ret = __rbd_update_snaps(rbd_dev);
+       ret = __rbd_refresh_header(rbd_dev);
        if (ret < 0)
                goto err_unlock;
 
index aa2712060bfbcd153e446f5adcf80a9d61377e75..eb0d8216f557434b36e6fbc809b75c33bbbc1292 100644 (file)
@@ -513,6 +513,21 @@ static void process_page(unsigned long data)
        }
 }
 
+static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+       struct cardinfo *card = cb->data;
+
+       spin_lock_irq(&card->lock);
+       activate(card);
+       spin_unlock_irq(&card->lock);
+       kfree(cb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+       return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
@@ -523,6 +538,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
+       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+               activate(card);
        spin_unlock_irq(&card->lock);
 
        return;
index 773cf27dc23fc595d6fca48ea8ec6c4992438d18..9ad3b5ec1dc1c521085db47a7928cc8cf1179701 100644 (file)
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
index 60eed4bdd2e4528ae3c3b8871cd65f85d3c34952..e4fb3374dcd2aaa6d0f834cd9394564a3c022a38 100644 (file)
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
        return free;
 }
 
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
 {
+       if (info->shadow[id].req.u.rw.id != id)
+               return -EINVAL;
+       if (info->shadow[id].request == NULL)
+               return -EINVAL;
        info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
+       return 0;
 }
 
+static const char *op_name(int op)
+{
+       static const char *const names[] = {
+               [BLKIF_OP_READ] = "read",
+               [BLKIF_OP_WRITE] = "write",
+               [BLKIF_OP_WRITE_BARRIER] = "barrier",
+               [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+               [BLKIF_OP_DISCARD] = "discard" };
+
+       if (op < 0 || op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 {
        unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
+               /*
+                * The backend has messed up and given us an id that we would
+                * never have given to it (we stamp it up to BLK_RING_SIZE -
+                * look in get_id_from_freelist.
+                */
+               if (id >= BLK_RING_SIZE) {
+                       WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       /* We can't safely get the 'struct request' as
+                        * the id is busted. */
+                       continue;
+               }
                req  = info->shadow[id].request;
 
                if (bret->operation != BLKIF_OP_DISCARD)
                        blkif_completion(&info->shadow[id]);
 
-               add_id_to_freelist(info, id);
+               if (add_id_to_freelist(info, id)) {
+                       WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       continue;
+               }
 
                error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
-                               printk(KERN_WARNING "blkfront: %s: discard op failed\n",
-                                          info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                          info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-                               printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
-                               printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(error)) {
index ad591bd240ec3d5fed2fd2444a4d26313c76eb12..10308cd8a7ed2276f146c86c752383de5517464a 100644 (file)
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
 
        /* Atheros AR3011 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3002) },
+       { USB_DEVICE(0x0CF3, 0xE019) },
        { USB_DEVICE(0x13d3, 0x3304) },
        { USB_DEVICE(0x0930, 0x0215) },
        { USB_DEVICE(0x0489, 0xE03D) },
@@ -77,6 +78,7 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x0CF3, 0xE004) },
+       { USB_DEVICE(0x0930, 0x0219) },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
@@ -101,6 +103,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
index 94f2d65131c441d213365936e14e513e3248097d..27068d1493808ec3f2acda115fd51773cbc4cfc5 100644 (file)
@@ -136,7 +136,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv);
 
 void btmrvl_interrupt(struct btmrvl_private *priv);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
 int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
index 681ca9d18e125e39cbec92071560ce3d0da4269a..dc304def8400ca4d38c67c989d1560cea7711303 100644 (file)
@@ -44,23 +44,33 @@ void btmrvl_interrupt(struct btmrvl_private *priv)
 }
 EXPORT_SYMBOL_GPL(btmrvl_interrupt);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
 {
        struct hci_event_hdr *hdr = (void *) skb->data;
        struct hci_ev_cmd_complete *ec;
-       u16 opcode, ocf;
+       u16 opcode, ocf, ogf;
 
        if (hdr->evt == HCI_EV_CMD_COMPLETE) {
                ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
                opcode = __le16_to_cpu(ec->opcode);
                ocf = hci_opcode_ocf(opcode);
+               ogf = hci_opcode_ogf(opcode);
+
                if (ocf == BT_CMD_MODULE_CFG_REQ &&
                                        priv->btmrvl_dev.sendcmdflag) {
                        priv->btmrvl_dev.sendcmdflag = false;
                        priv->adapter->cmd_complete = true;
                        wake_up_interruptible(&priv->adapter->cmd_wait_q);
                }
+
+               if (ogf == OGF) {
+                       BT_DBG("vendor event skipped: ogf 0x%4.4x", ogf);
+                       kfree_skb(skb);
+                       return false;
+               }
        }
+
+       return true;
 }
 EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
 
index a853244e7fd7b59b1689c0281a1d44534b1e708c..0cd61d9f07cdb1d1d70c8dfc0d2e52c239903402 100644 (file)
@@ -562,10 +562,12 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
                skb_put(skb, buf_len);
                skb_pull(skb, SDIO_HEADER_LEN);
 
-               if (type == HCI_EVENT_PKT)
-                       btmrvl_check_evtpkt(priv, skb);
+               if (type == HCI_EVENT_PKT) {
+                       if (btmrvl_check_evtpkt(priv, skb))
+                               hci_recv_frame(skb);
+               } else
+                       hci_recv_frame(skb);
 
-               hci_recv_frame(skb);
                hdev->stat.byte_rx += buf_len;
                break;
 
index c9463af8e564e8707bab12374e5324b0739d5994..83ebb241bfcc8bbd1f279875141f58325bbd09c3 100644 (file)
@@ -125,6 +125,7 @@ static struct usb_device_id blacklist_table[] = {
 
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -139,6 +140,7 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
index 764f70c5e690259dea53ea87e6ffa72946b8e7a3..0a41852794177636f62c91010430d85891f1425b 100644 (file)
@@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_B43_HB),
        ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+       ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
index c0091753a0d191c0268f8093f3ccc89fc4dd3280..8e2d9140f300291ae506741d1c7bb4135b728599 100644 (file)
 #define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
 #define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB          0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB         0x0069
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG          0x0042
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB          0x0044
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
index f518b99f53f5b995a3c8f9cd5d8ff616a62d3855..731c9046cf7bf0dbd06d26be41ea1e1b25b87ac8 100644 (file)
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
        u32 *data = buf;
 
        /* data ready? */
-       if (readl(trng->base + TRNG_ODATA) & 1) {
+       if (readl(trng->base + TRNG_ISR) & 1) {
                *data = readl(trng->base + TRNG_ODATA);
+               /*
+                 ensure data ready is only set again AFTER the next data
+                 word is ready in case it got set between checking ISR
+                 and reading ODATA, so we don't risk re-reading the
+                 same word
+               */
+               readl(trng->base + TRNG_ISR);
                return 4;
        } else
                return 0;
index 687b00d67c8a77a88ad4e135605c1ed11a319efb..9a1eb0cfa95f3f0a00a7334f3b276bf623d88cf6 100644 (file)
@@ -850,18 +850,21 @@ static void clk_change_rate(struct clk *clk)
 {
        struct clk *child;
        unsigned long old_rate;
+       unsigned long best_parent_rate = 0;
        struct hlist_node *tmp;
 
        old_rate = clk->rate;
 
+       if (clk->parent)
+               best_parent_rate = clk->parent->rate;
+
        if (clk->ops->set_rate)
-               clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
+               clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
 
        if (clk->ops->recalc_rate)
-               clk->rate = clk->ops->recalc_rate(clk->hw,
-                               clk->parent->rate);
+               clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
        else
-               clk->rate = clk->parent->rate;
+               clk->rate = best_parent_rate;
 
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -999,7 +1002,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        if (!clk->parents)
                clk->parents =
-                       kmalloc((sizeof(struct clk*) * clk->num_parents),
+                       kzalloc((sizeof(struct clk*) * clk->num_parents),
                                        GFP_KERNEL);
 
        if (!clk->parents)
@@ -1064,21 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 
        old_parent = clk->parent;
 
-       /* find index of new parent clock using cached parent ptrs */
-       for (i = 0; i < clk->num_parents; i++)
-               if (clk->parents[i] == parent)
-                       break;
+       if (!clk->parents)
+               clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+                                                               GFP_KERNEL);
 
        /*
-        * find index of new parent clock using string name comparison
-        * also try to cache the parent to avoid future calls to __clk_lookup
+        * find index of new parent clock using cached parent ptrs,
+        * or if not yet cached, use string name comparison and cache
+        * them now to avoid future calls to __clk_lookup.
         */
-       if (i == clk->num_parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (!strcmp(clk->parent_names[i], parent->name)) {
+       for (i = 0; i < clk->num_parents; i++) {
+               if (clk->parents && clk->parents[i] == parent)
+                       break;
+               else if (!strcmp(clk->parent_names[i], parent->name)) {
+                       if (clk->parents)
                                clk->parents[i] = __clk_lookup(parent->name);
-                               break;
-                       }
+                       break;
+               }
+       }
 
        if (i == clk->num_parents) {
                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
index f7be225f544cb36c4c733583dfb6d168f53d9a21..db2391c054ee0a51805368d0a369267635375cf1 100644 (file)
@@ -71,7 +71,7 @@ static void __init clk_misc_init(void)
        __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
 }
 
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
        { .dev_id = "duart", },
        { .dev_id = "mxs-auart.0", },
        { .dev_id = "mxs-auart.1", },
@@ -80,31 +80,31 @@ static struct clk_lookup uart_lookups[] __initdata = {
        { .dev_id = "80070000.serial", },
 };
 
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
        { .dev_id = "imx23-dma-apbh", },
        { .dev_id = "80004000.dma-apbh", },
 };
 
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
        { .dev_id = "duart", .con_id = "apb_pclk"},
        { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
        { .dev_id = "imx23-dma-apbx", },
        { .dev_id = "80024000.dma-apbx", },
 };
 
-static struct clk_lookup ssp_lookups[] __initdata = {
+static struct clk_lookup ssp_lookups[] = {
        { .dev_id = "imx23-mmc.0", },
        { .dev_id = "imx23-mmc.1", },
        { .dev_id = "80010000.ssp", },
        { .dev_id = "80034000.ssp", },
 };
 
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
        { .dev_id = "imx23-fb", },
        { .dev_id = "80030000.lcdif", },
 };
 
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
        { .dev_id = "imx23-gpmi-nand", },
        { .dev_id = "8000c000.gpmi", },
 };
index 2826a2606a29fca4f1474ce76fe87fc88fb968ea..7fad6c8c13d222fc9a528191835d141d06ffcec5 100644 (file)
@@ -120,7 +120,7 @@ static void __init clk_misc_init(void)
        writel_relaxed(val, FRAC0);
 }
 
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
        { .dev_id = "duart", },
        { .dev_id = "mxs-auart.0", },
        { .dev_id = "mxs-auart.1", },
@@ -135,71 +135,71 @@ static struct clk_lookup uart_lookups[] __initdata = {
        { .dev_id = "80074000.serial", },
 };
 
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
        { .dev_id = "imx28-dma-apbh", },
        { .dev_id = "80004000.dma-apbh", },
 };
 
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
        { .dev_id = "duart", .con_id = "apb_pclk"},
        { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
        { .dev_id = "imx28-dma-apbx", },
        { .dev_id = "80024000.dma-apbx", },
 };
 
-static struct clk_lookup ssp0_lookups[] __initdata = {
+static struct clk_lookup ssp0_lookups[] = {
        { .dev_id = "imx28-mmc.0", },
        { .dev_id = "80010000.ssp", },
 };
 
-static struct clk_lookup ssp1_lookups[] __initdata = {
+static struct clk_lookup ssp1_lookups[] = {
        { .dev_id = "imx28-mmc.1", },
        { .dev_id = "80012000.ssp", },
 };
 
-static struct clk_lookup ssp2_lookups[] __initdata = {
+static struct clk_lookup ssp2_lookups[] = {
        { .dev_id = "imx28-mmc.2", },
        { .dev_id = "80014000.ssp", },
 };
 
-static struct clk_lookup ssp3_lookups[] __initdata = {
+static struct clk_lookup ssp3_lookups[] = {
        { .dev_id = "imx28-mmc.3", },
        { .dev_id = "80016000.ssp", },
 };
 
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
        { .dev_id = "imx28-fb", },
        { .dev_id = "80030000.lcdif", },
 };
 
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
        { .dev_id = "imx28-gpmi-nand", },
        { .dev_id = "8000c000.gpmi", },
 };
 
-static struct clk_lookup fec_lookups[] __initdata = {
+static struct clk_lookup fec_lookups[] = {
        { .dev_id = "imx28-fec.0", },
        { .dev_id = "imx28-fec.1", },
        { .dev_id = "800f0000.ethernet", },
        { .dev_id = "800f4000.ethernet", },
 };
 
-static struct clk_lookup can0_lookups[] __initdata = {
+static struct clk_lookup can0_lookups[] = {
        { .dev_id = "flexcan.0", },
        { .dev_id = "80032000.can", },
 };
 
-static struct clk_lookup can1_lookups[] __initdata = {
+static struct clk_lookup can1_lookups[] = {
        { .dev_id = "flexcan.1", },
        { .dev_id = "80034000.can", },
 };
 
-static struct clk_lookup saif0_lookups[] __initdata = {
+static struct clk_lookup saif0_lookups[] = {
        { .dev_id = "mxs-saif.0", },
        { .dev_id = "80042000.saif", },
 };
 
-static struct clk_lookup saif1_lookups[] __initdata = {
+static struct clk_lookup saif1_lookups[] = {
        { .dev_id = "mxs-saif.1", },
        { .dev_id = "80046000.saif", },
 };
@@ -245,8 +245,8 @@ int __init mx28_clocks_init(void)
        clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
        clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
        clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
-       clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
-       clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+       clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
+       clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
        clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
        clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
        clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
index af34074e702b8930167c7291b0aa9ae041c93dcb..6756e7c3bc07d496f9136e98f15ce46dad0bc82a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4dbdb3fe18e02bd042867a3ae7c72aa77fbaa017..958aa3ad1d6023bbe39a70c594b8e45099ac1a8d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index b471c9762a9763471ebd8cefb1d64ee164a894d5..1afc18c4effcc150f6bda6dede54e134e9ab4d68 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index dcd4bdf4b0d99f796747d47559d8eedbc1fdb1c3..5f1b6badeb15be0a8fa8a9285f90c0bfe9c9b55f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 376d4e5ff32609985e53f6fdc7f6b3d63331e895..7cd63788d546d885e70ef2b543c3f1ef93dffe43 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 3321c46a071c5641169e94e34e01e15602a93f04..931737677dfab553032f8a720893c719c4f0a639 100644 (file)
@@ -2,7 +2,7 @@
  * Clock framework definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 42b68df9aeef20f2857373a06f235fb58de8f5e6..0fcec2aae19cc032dfbfb8475319044420a22501 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -345,31 +345,30 @@ static struct frac_rate_tbl gen_rtbl[] = {
 /* clock parents */
 static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
 static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
-static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
        "osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
-       "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
 static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
 static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
        "i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
 static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll3_clk", };
 static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll2_clk", };
 static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
-       "ras_pll2_clk", "ras_synth0_clk", };
+       "ras_pll2_clk", "ras_syn0_clk", };
 static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
-       "ras_pll2_clk", "ras_synth0_clk", };
-static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
-static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
-static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+       "ras_pll2_clk", "ras_syn0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
        "ras_plclk0_clk", };
-static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
-static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
 
 void __init spear1310_clk_init(void)
 {
@@ -390,9 +389,9 @@ void __init spear1310_clk_init(void)
                        25000000);
        clk_register_clkdev(clk, "osc_25m_clk", NULL);
 
-       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
-                       CLK_IS_ROOT, 125000000);
-       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+       clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+                       125000000);
+       clk_register_clkdev(clk, "gmii_pad_clk", NULL);
 
        clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
                        CLK_IS_ROOT, 12288000);
@@ -406,34 +405,34 @@ void __init spear1310_clk_init(void)
 
        /* clock derived from 24 or 25 MHz osc clk */
        /* vco-pll */
-       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+       clk_register_clkdev(clk, "vco1_mclk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
                        0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+       clk_register_clkdev(clk, "vco2_mclk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
                        0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+       clk_register_clkdev(clk, "vco3_mclk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
                        0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco3_clk", NULL);
@@ -473,7 +472,7 @@ void __init spear1310_clk_init(void)
        /* peripherals */
        clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
                        128);
-       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+       clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -500,177 +499,176 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, "apb_clk", NULL);
 
        /* gpt clocks */
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt0_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* others */
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "e0000000.serial");
 
-       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+       clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
-       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b3000000.sdhci");
 
-       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+       clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b2800000.cf");
        clk_register_clkdev(clk, NULL, "arasan_xd");
 
-       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "c3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+       clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+       clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
                        ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "c3");
 
        /* gmac */
-       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
-                       gmac_phy_input_parents,
+       clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
                        ARRAY_SIZE(gmac_phy_input_parents), 0,
                        SPEAR1310_GMAC_CLK_CFG,
                        SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
                        SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+       clk_register_clkdev(clk, "phy_input_mclk", NULL);
 
-       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
-                       "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
-                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+       clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+                       0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+                       ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "phy_syn_clk", NULL);
+       clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+       clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
                        ARRAY_SIZE(gmac_phy_parents), 0,
                        SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
                        SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.0");
 
        /* clcd */
-       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+       clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
                        ARRAY_SIZE(clcd_synth_parents), 0,
                        SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
                        SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
-       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+       clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
                        SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
                        ARRAY_SIZE(clcd_rtbl), &_lock);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+       clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
                        ARRAY_SIZE(clcd_pixel_parents), 0,
                        SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
                        SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "clcd_clk", NULL);
 
        /* i2s */
-       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+       clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
                        ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
                        SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_src_clk", NULL);
 
-       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
                        SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
                        ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
        clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+       clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
                        ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
                        SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
                        &_lock);
        clk_register_clkdev(clk, "i2s_ref_clk", NULL);
 
-       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
 
-       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk",
                        "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
                        &i2s_sclk_masks, i2s_sclk_rtbl,
                        ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
        clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
-       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
 
        /* clock derived from ahb clk */
        clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -747,13 +745,13 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "sysram1_clk", NULL);
 
-       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+       clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
                        0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
                        ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "adc_synth_clk", NULL);
-       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "adc_syn_clk", NULL);
+       clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "adc_clk");
@@ -790,37 +788,37 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0300000.kbd");
 
        /* RAS clks */
-       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
-                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
-                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+                       ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
                        SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
-                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
-                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+                       ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
                        SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
 
-       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_clk", NULL);
 
-       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn1_clk", NULL);
 
-       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_clk", NULL);
 
-       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn3_clk", NULL);
 
        clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
                        SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
@@ -847,7 +845,7 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "ras_pll3_clk", NULL);
 
-       clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+       clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0,
                        SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "ras_tx125_clk", NULL);
@@ -912,7 +910,7 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c700000.eth");
 
-       clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+       clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
                        smii_rgmii_phy_parents,
                        ARRAY_SIZE(smii_rgmii_phy_parents), 0,
                        SPEAR1310_RAS_CTRL_REG1,
@@ -922,184 +920,184 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, NULL, "stmmacphy.2");
        clk_register_clkdev(clk, NULL, "stmmacphy.4");
 
-       clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+       clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
                        ARRAY_SIZE(rmii_phy_parents), 0,
                        SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
                        SPEAR1310_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.3");
 
-       clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c800000.serial");
 
-       clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c900000.serial");
 
-       clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5ca00000.serial");
 
-       clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart4_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cb00000.serial");
 
-       clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart5_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cc00000.serial");
 
-       clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cd00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5ce00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cf00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c4_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d000000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c5_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d100000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c6_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d200000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c7_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d300000.i2c");
 
-       clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+       clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
                        ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+       clk_register_clkdev(clk, "ssp1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d400000.spi");
 
-       clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+       clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
                        ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "pci_mux_clk", NULL);
+       clk_register_clkdev(clk, "pci_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+       clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "pci");
 
-       clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+       clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
                        ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+       clk_register_clkdev(clk, "tdm1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
 
-       clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+       clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
                        ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+       clk_register_clkdev(clk, "tdm2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
index f130919d5bf86b2391f490eaf65c6d9f574c97f1..2352cee7f6455ed95c8b22e6d91824bad15fd1e4 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -369,27 +369,25 @@ static struct frac_rate_tbl gen_rtbl[] = {
 
 /* clock parents */
 static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
-static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
-       "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
-static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk",
+       "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", };
 static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
 static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
-       "uart0_synth_gate_clk", };
+       "uart0_syn_gclk", };
 static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
-       "uart1_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+       "uart1_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
        "osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
-       "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
 static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
 static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
        "i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
-static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
-};
-static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", };
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", };
 
 static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll3_clk", };
@@ -415,9 +413,9 @@ void __init spear1340_clk_init(void)
                        25000000);
        clk_register_clkdev(clk, "osc_25m_clk", NULL);
 
-       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
-                       CLK_IS_ROOT, 125000000);
-       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+       clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+                       125000000);
+       clk_register_clkdev(clk, "gmii_pad_clk", NULL);
 
        clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
                        CLK_IS_ROOT, 12288000);
@@ -431,35 +429,35 @@ void __init spear1340_clk_init(void)
 
        /* clock derived from 24 or 25 MHz osc clk */
        /* vco-pll */
-       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
-                       0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco1_mclk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
+                       SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
-                       0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco2_mclk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
+                       SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
-                       0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco3_mclk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
+                       SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco3_clk", NULL);
        clk_register_clkdev(clk1, "pll3_clk", NULL);
@@ -498,7 +496,7 @@ void __init spear1340_clk_init(void)
        /* peripherals */
        clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
                        128);
-       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+       clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -509,23 +507,23 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, "ddr_clk", NULL);
 
        /* clock derived from pll1 clk */
-       clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+       clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0,
                        SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
                        ARRAY_SIZE(sys_synth_rtbl), &_lock);
-       clk_register_clkdev(clk, "sys_synth_clk", NULL);
+       clk_register_clkdev(clk, "sys_syn_clk", NULL);
 
-       clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+       clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0,
                        SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
                        ARRAY_SIZE(amba_synth_rtbl), &_lock);
-       clk_register_clkdev(clk, "amba_synth_clk", NULL);
+       clk_register_clkdev(clk, "amba_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+       clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
                        ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
                        SPEAR1340_SCLK_SRC_SEL_SHIFT,
                        SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
        clk_register_clkdev(clk, "sys_clk", NULL);
 
-       clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+       clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1,
                        2);
        clk_register_clkdev(clk, "cpu_clk", NULL);
 
@@ -548,194 +546,193 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, "apb_clk", NULL);
 
        /* gpt clocks */
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt0_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* others */
-       clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+       clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart0_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "uart0_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "e0000000.serial");
 
-       clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+       clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart1_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "uart1_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+       clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
                        ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
-                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b4100000.serial");
 
-       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+       clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
-       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b3000000.sdhci");
 
-       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+       clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b2800000.cf");
        clk_register_clkdev(clk, NULL, "arasan_xd");
 
-       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "c3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+       clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0,
+                       SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+       clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
                        ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "c3");
 
        /* gmac */
-       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
-                       gmac_phy_input_parents,
+       clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
                        ARRAY_SIZE(gmac_phy_input_parents), 0,
                        SPEAR1340_GMAC_CLK_CFG,
                        SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
                        SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+       clk_register_clkdev(clk, "phy_input_mclk", NULL);
 
-       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
-                       "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
-                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+       clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+                       0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+                       ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "phy_syn_clk", NULL);
+       clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+       clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
                        ARRAY_SIZE(gmac_phy_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
                        SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.0");
 
        /* clcd */
-       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+       clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
                        ARRAY_SIZE(clcd_synth_parents), 0,
                        SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
                        SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
-       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+       clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
                        SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
                        ARRAY_SIZE(clcd_rtbl), &_lock);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+       clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
                        ARRAY_SIZE(clcd_pixel_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
                        SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "clcd_clk", NULL);
 
        /* i2s */
-       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+       clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
                        ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
                        SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_src_clk", NULL);
 
-       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
                        SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
                        ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
        clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+       clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
                        ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
                        SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
                        &_lock);
        clk_register_clkdev(clk, "i2s_ref_clk", NULL);
 
-       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
 
-       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
-                       "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
-                       &i2s_sclk_masks, i2s_sclk_rtbl,
-                       ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk",
+                       0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks,
+                       i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock,
+                       &clk1);
        clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
-       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
 
        /* clock derived from ahb clk */
        clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -744,7 +741,7 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0280000.i2c");
 
        clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
-                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b4000000.i2c");
 
@@ -800,13 +797,13 @@ void __init spear1340_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "sysram1_clk", NULL);
 
-       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+       clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
                        0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
                        ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "adc_synth_clk", NULL);
-       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "adc_syn_clk", NULL);
+       clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "adc_clk");
@@ -843,39 +840,39 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0300000.kbd");
 
        /* RAS clks */
-       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
-                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
-                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+                       ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
                        SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
-                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
-                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+                       ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
                        SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
 
-       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_clk", NULL);
 
-       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn1_clk", NULL);
 
-       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_clk", NULL);
 
-       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn3_clk", NULL);
 
-       clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+       clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "mali");
@@ -890,74 +887,74 @@ void __init spear1340_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_cec.1");
 
-       clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+       clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
                        ARRAY_SIZE(spdif_out_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
                        SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+       clk_register_clkdev(clk, "spdif_out_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+       clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "spdif-out");
 
-       clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+       clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
                        ARRAY_SIZE(spdif_in_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
                        SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+       clk_register_clkdev(clk, "spdif_in_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+       clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spdif-in");
 
-       clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+       clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "acp_clk");
 
-       clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+       clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "plgpio");
 
-       clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "video_dec");
 
-       clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "video_enc");
 
-       clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_vip");
 
-       clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.0");
 
-       clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.1");
 
-       clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.2");
 
-       clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.3");
 
-       clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+       clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "pwm");
index 440bb3e4c971262f04b7e5cad6ed875c903e48bc..c3157454bb3fc907bb787a7abc9ee5259cf7dce5 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr3xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -122,12 +122,12 @@ static struct gpt_rate_tbl gpt_rtbl[] = {
 };
 
 /* clock parents */
-static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk",
 };
-static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
-static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", };
+static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
 static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
 static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
        "pll2_clk", };
@@ -137,7 +137,7 @@ static void __init spear300_clk_init(void)
 {
        struct clk *clk;
 
-       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
                        1, 1);
        clk_register_clkdev(clk, NULL, "60000000.clcd");
 
@@ -219,15 +219,11 @@ static void __init spear310_clk_init(void)
        #define SPEAR320_UARTX_PCLK_VAL_SYNTH1          0x0
        #define SPEAR320_UARTX_PCLK_VAL_APB             0x1
 
-static const char *i2s_ref_parents[] = { "ras_pll2_clk",
-       "ras_gen2_synth_gate_clk", };
-static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
-       "ras_gen3_synth_gate_clk",
-};
+static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", };
+static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", };
 static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
-       "ras_gen0_synth_gate_clk", };
-static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
-};
+       "ras_syn0_gclk", };
+static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
 
 static void __init spear320_clk_init(void)
 {
@@ -237,7 +233,7 @@ static void __init spear320_clk_init(void)
                        CLK_IS_ROOT, 125000000);
        clk_register_clkdev(clk, "smii_125m_pad", NULL);
 
-       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
                        1, 1);
        clk_register_clkdev(clk, NULL, "90000000.clcd");
 
@@ -363,9 +359,9 @@ void __init spear3xx_clk_init(void)
        clk_register_clkdev(clk, NULL, "fc900000.rtc");
 
        /* clock derived from 24 MHz osc clk */
-       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+       clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
                        48000000);
-       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+       clk_register_clkdev(clk, "pll3_clk", NULL);
 
        clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
                        1);
@@ -392,98 +388,98 @@ void __init spear3xx_clk_init(void)
                        HCLK_RATIO_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ahb_clk", NULL);
 
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+                       UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
                        UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB,
+                       UART_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0000000.serial");
 
-       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
-                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "firda_synth_clk", NULL);
-       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+       clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0,
+                       FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_syn_clk", NULL);
+       clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+       clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
                        ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
                        FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+       clk_register_clkdev(clk, "firda_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
                        PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "firda");
 
        /* gpt clocks */
-       clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
        clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
                        ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
                        GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+       clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
                        ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
                        GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+       clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
                        ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
                        GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
        /* general synths clocks */
-       clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
-                       "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen0_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
-
-       clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
-                       "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen1_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
-
-       clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+       clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk",
+                       0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "gen0_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen0_syn_gclk", NULL);
+
+       clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk",
+                       0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "gen1_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
+
+       clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
                        ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
                        GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+       clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
 
-       clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
-                       "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+       clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
+                       "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
                        ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen2_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "gen2_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen2_syn_gclk", NULL);
 
-       clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
-                       "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+       clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk",
+                       "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
                        ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "gen3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen3_syn_gclk", NULL);
 
        /* clock derived from pll3 clk */
-       clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBH_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "usbh_clk", NULL);
 
        clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
@@ -494,8 +490,8 @@ void __init spear3xx_clk_init(void)
                        1);
        clk_register_clkdev(clk, "usbh.1_clk", NULL);
 
-       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "designware_udc");
 
        /* clock derived from ahb clk */
@@ -579,29 +575,25 @@ void __init spear3xx_clk_init(void)
                        RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "ras_pll2_clk", NULL);
 
-       clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
                        RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
-                       "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT0_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
-                       "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT1_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
-                       "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT2_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
-                       "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT3_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn0_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn1_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn2_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn3_gclk", NULL);
 
        if (of_machine_is_compatible("st,spear300"))
                spear300_clk_init();
index f9a20b382304e9b7d0b209902ab648802cce499e..a98d0866f5416b4dab5d4073362f37c91825d873 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr6xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -97,13 +97,12 @@ static struct aux_rate_tbl aux_rtbl[] = {
        {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
 };
 
-static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
-};
-static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
-static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", };
+static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
+static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", };
 static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
        "pll2_clk", };
 
@@ -136,9 +135,9 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, NULL, "rtc-spear");
 
        /* clock derived from 30 MHz osc clk */
-       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+       clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
                        48000000);
-       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+       clk_register_clkdev(clk, "pll3_clk", NULL);
 
        clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
                        0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
@@ -146,9 +145,9 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
-                       "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
-                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk",
+                       0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+                       &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
@@ -165,111 +164,111 @@ void __init spear6xx_clk_init(void)
                        HCLK_RATIO_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ahb_clk", NULL);
 
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+                       UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
                        UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "uart_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
+                       UART0_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0000000.serial");
 
-       clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB,
+                       UART1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0080000.serial");
 
-       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
-                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "firda_synth_clk", NULL);
-       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+       clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk",
+                       0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_syn_clk", NULL);
+       clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+       clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
                        ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
                        FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+       clk_register_clkdev(clk, "firda_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
                        PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "firda");
 
-       clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
-                       "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+       clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk",
+                       0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+       clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
                        ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
                        CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
                        PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "clcd");
 
        /* gpt clocks */
-       clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+       clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
                        ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
                        GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
                        ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
                        GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+       clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
                        ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
                        GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+       clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
                        ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
                        GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* clock derived from pll3 clk */
-       clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0,
                        PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "usbh.0_clk");
 
-       clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0,
                        PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "usbh.1_clk");
 
-       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "designware_udc");
 
        /* clock derived from ahb clk */
@@ -278,9 +277,8 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, "ahbmult2_clk", NULL);
 
        clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
-                       ARRAY_SIZE(ddr_parents),
-                       0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
-                       &_lock);
+                       ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+                       MCTR_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ddr_clk", NULL);
 
        clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
@@ -298,7 +296,7 @@ void __init spear6xx_clk_init(void)
 
        clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
                        GMAC_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, NULL, "gmac");
+       clk_register_clkdev(clk, NULL, "e0800000.ethernet");
 
        clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
                        I2C_CLK_ENB, 0, &_lock);
index 8d81a1d32653d890094066461694ebd4dac81f2e..dd3e661a124d2ba9161a9872360495beef40cd76 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)    += cs5535-clockevt.o
 obj-$(CONFIG_SH_TIMER_CMT)     += sh_cmt.o
 obj-$(CONFIG_SH_TIMER_MTU2)    += sh_mtu2.o
 obj-$(CONFIG_SH_TIMER_TMU)     += sh_tmu.o
+obj-$(CONFIG_EM_TIMER_STI)     += em_sti.o
 obj-$(CONFIG_CLKBLD_I8253)     += i8253.o
 obj-$(CONFIG_CLKSRC_MMIO)      += mmio.o
 obj-$(CONFIG_DW_APB_TIMER)     += dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644 (file)
index 0000000..372051d
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ * Emma Mobile Timer Support - STI
+ *
+ *  Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
+
+struct em_sti_priv {
+       void __iomem *base;
+       struct clk *clk;
+       struct platform_device *pdev;
+       unsigned int active[USER_NR];
+       unsigned long rate;
+       raw_spinlock_t lock;
+       struct clock_event_device ced;
+       struct clocksource cs;
+};
+
+#define STI_CONTROL 0x00
+#define STI_COMPA_H 0x10
+#define STI_COMPA_L 0x14
+#define STI_COMPB_H 0x18
+#define STI_COMPB_L 0x1c
+#define STI_COUNT_H 0x20
+#define STI_COUNT_L 0x24
+#define STI_COUNT_RAW_H 0x28
+#define STI_COUNT_RAW_L 0x2c
+#define STI_SET_H 0x30
+#define STI_SET_L 0x34
+#define STI_INTSTATUS 0x40
+#define STI_INTRAWSTATUS 0x44
+#define STI_INTENSET 0x48
+#define STI_INTENCLR 0x4c
+#define STI_INTFFCLR 0x50
+
+static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
+{
+       return ioread32(p->base + offs);
+}
+
+static inline void em_sti_write(struct em_sti_priv *p, int offs,
+                               unsigned long value)
+{
+       iowrite32(value, p->base + offs);
+}
+
+static int em_sti_enable(struct em_sti_priv *p)
+{
+       int ret;
+
+       /* enable clock */
+       ret = clk_enable(p->clk);
+       if (ret) {
+               dev_err(&p->pdev->dev, "cannot enable clock\n");
+               return ret;
+       }
+
+       /* configure channel, periodic mode and maximum timeout */
+       p->rate = clk_get_rate(p->clk);
+
+       /* reset the counter */
+       em_sti_write(p, STI_SET_H, 0x40000000);
+       em_sti_write(p, STI_SET_L, 0x00000000);
+
+       /* mask and clear pending interrupts */
+       em_sti_write(p, STI_INTENCLR, 3);
+       em_sti_write(p, STI_INTFFCLR, 3);
+
+       /* enable updates of counter registers */
+       em_sti_write(p, STI_CONTROL, 1);
+
+       return 0;
+}
+
+static void em_sti_disable(struct em_sti_priv *p)
+{
+       /* mask interrupts */
+       em_sti_write(p, STI_INTENCLR, 3);
+
+       /* stop clock */
+       clk_disable(p->clk);
+}
+
+static cycle_t em_sti_count(struct em_sti_priv *p)
+{
+       cycle_t ticks;
+       unsigned long flags;
+
+       /* the STI hardware buffers the 48-bit count, but to
+        * break it out into two 32-bit access the registers
+        * must be accessed in a certain order.
+        * Always read STI_COUNT_H before STI_COUNT_L.
+        */
+       raw_spin_lock_irqsave(&p->lock, flags);
+       ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+       ticks |= em_sti_read(p, STI_COUNT_L);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return ticks;
+}
+
+static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+
+       /* mask compare A interrupt */
+       em_sti_write(p, STI_INTENCLR, 1);
+
+       /* update compare A value */
+       em_sti_write(p, STI_COMPA_H, next >> 32);
+       em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
+
+       /* clear compare A interrupt source */
+       em_sti_write(p, STI_INTFFCLR, 1);
+
+       /* unmask compare A interrupt */
+       em_sti_write(p, STI_INTENSET, 1);
+
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return next;
+}
+
+static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
+{
+       struct em_sti_priv *p = dev_id;
+
+       p->ced.event_handler(&p->ced);
+       return IRQ_HANDLED;
+}
+
+static int em_sti_start(struct em_sti_priv *p, unsigned int user)
+{
+       unsigned long flags;
+       int used_before;
+       int ret = 0;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+       used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+       if (!used_before)
+               ret = em_sti_enable(p);
+
+       if (!ret)
+               p->active[user] = 1;
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return ret;
+}
+
+static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
+{
+       unsigned long flags;
+       int used_before, used_after;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+       used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+       p->active[user] = 0;
+       used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+
+       if (used_before && !used_after)
+               em_sti_disable(p);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
+{
+       return container_of(cs, struct em_sti_priv, cs);
+}
+
+static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+{
+       return em_sti_count(cs_to_em_sti(cs));
+}
+
+static int em_sti_clocksource_enable(struct clocksource *cs)
+{
+       int ret;
+       struct em_sti_priv *p = cs_to_em_sti(cs);
+
+       ret = em_sti_start(p, USER_CLOCKSOURCE);
+       if (!ret)
+               __clocksource_updatefreq_hz(cs, p->rate);
+       return ret;
+}
+
+static void em_sti_clocksource_disable(struct clocksource *cs)
+{
+       em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
+}
+
+static void em_sti_clocksource_resume(struct clocksource *cs)
+{
+       em_sti_clocksource_enable(cs);
+}
+
+static int em_sti_register_clocksource(struct em_sti_priv *p)
+{
+       struct clocksource *cs = &p->cs;
+
+       memset(cs, 0, sizeof(*cs));
+       cs->name = dev_name(&p->pdev->dev);
+       cs->rating = 200;
+       cs->read = em_sti_clocksource_read;
+       cs->enable = em_sti_clocksource_enable;
+       cs->disable = em_sti_clocksource_disable;
+       cs->suspend = em_sti_clocksource_disable;
+       cs->resume = em_sti_clocksource_resume;
+       cs->mask = CLOCKSOURCE_MASK(48);
+       cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+       dev_info(&p->pdev->dev, "used as clock source\n");
+
+       /* Register with dummy 1 Hz value, gets updated in ->enable() */
+       clocksource_register_hz(cs, 1);
+       return 0;
+}
+
+static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
+{
+       return container_of(ced, struct em_sti_priv, ced);
+}
+
+static void em_sti_clock_event_mode(enum clock_event_mode mode,
+                                   struct clock_event_device *ced)
+{
+       struct em_sti_priv *p = ced_to_em_sti(ced);
+
+       /* deal with old setting first */
+       switch (ced->mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               em_sti_stop(p, USER_CLOCKEVENT);
+               break;
+       default:
+               break;
+       }
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+               em_sti_start(p, USER_CLOCKEVENT);
+               clockevents_config(&p->ced, p->rate);
+               break;
+       case CLOCK_EVT_MODE_SHUTDOWN:
+       case CLOCK_EVT_MODE_UNUSED:
+               em_sti_stop(p, USER_CLOCKEVENT);
+               break;
+       default:
+               break;
+       }
+}
+
+static int em_sti_clock_event_next(unsigned long delta,
+                                  struct clock_event_device *ced)
+{
+       struct em_sti_priv *p = ced_to_em_sti(ced);
+       cycle_t next;
+       int safe;
+
+       next = em_sti_set_next(p, em_sti_count(p) + delta);
+       safe = em_sti_count(p) < (next - 1);
+
+       return !safe;
+}
+
+static void em_sti_register_clockevent(struct em_sti_priv *p)
+{
+       struct clock_event_device *ced = &p->ced;
+
+       memset(ced, 0, sizeof(*ced));
+       ced->name = dev_name(&p->pdev->dev);
+       ced->features = CLOCK_EVT_FEAT_ONESHOT;
+       ced->rating = 200;
+       ced->cpumask = cpumask_of(0);
+       ced->set_next_event = em_sti_clock_event_next;
+       ced->set_mode = em_sti_clock_event_mode;
+
+       dev_info(&p->pdev->dev, "used for clock events\n");
+
+       /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+       clockevents_config_and_register(ced, 1, 2, 0xffffffff);
+}
+
+static int __devinit em_sti_probe(struct platform_device *pdev)
+{
+       struct em_sti_priv *p;
+       struct resource *res;
+       int irq, ret;
+
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (p == NULL) {
+               dev_err(&pdev->dev, "failed to allocate driver data\n");
+               ret = -ENOMEM;
+               goto err0;
+       }
+
+       p->pdev = pdev;
+       platform_set_drvdata(pdev, p);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get I/O memory\n");
+               ret = -EINVAL;
+               goto err0;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "failed to get irq\n");
+               ret = -EINVAL;
+               goto err0;
+       }
+
+       /* map memory, let base point to the STI instance */
+       p->base = ioremap_nocache(res->start, resource_size(res));
+       if (p->base == NULL) {
+               dev_err(&pdev->dev, "failed to remap I/O memory\n");
+               ret = -ENXIO;
+               goto err0;
+       }
+
+       /* get hold of clock */
+       p->clk = clk_get(&pdev->dev, "sclk");
+       if (IS_ERR(p->clk)) {
+               dev_err(&pdev->dev, "cannot get clock\n");
+               ret = PTR_ERR(p->clk);
+               goto err1;
+       }
+
+       if (request_irq(irq, em_sti_interrupt,
+                       IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+                       dev_name(&pdev->dev), p)) {
+               dev_err(&pdev->dev, "failed to request low IRQ\n");
+               ret = -ENOENT;
+               goto err2;
+       }
+
+       raw_spin_lock_init(&p->lock);
+       em_sti_register_clockevent(p);
+       em_sti_register_clocksource(p);
+       return 0;
+
+err2:
+       clk_put(p->clk);
+err1:
+       iounmap(p->base);
+err0:
+       kfree(p);
+       return ret;
+}
+
+static int __devexit em_sti_remove(struct platform_device *pdev)
+{
+       return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+       { .compatible = "renesas,em-sti", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
+
+static struct platform_driver em_sti_device_driver = {
+       .probe          = em_sti_probe,
+       .remove         = __devexit_p(em_sti_remove),
+       .driver         = {
+               .name   = "em_sti",
+               .of_match_table = em_sti_dt_ids,
+       }
+};
+
+module_platform_driver(em_sti_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
+MODULE_LICENSE("GPL v2");
index 32fe9ef5cc5c374d3a9dabf5c733aa17bd73342f..98b06baafcc64dd95c2a16965ea261210080c324 100644 (file)
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
        unsigned long next_match_value;
        unsigned long max_match_value;
        unsigned long rate;
-       spinlock_t lock;
+       raw_spinlock_t lock;
        struct clock_event_device ced;
        struct clocksource cs;
        unsigned long total_cycles;
 };
 
-static DEFINE_SPINLOCK(sh_cmt_lock);
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
 
 #define CMSTR -1 /* shared register */
 #define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       spin_lock_irqsave(&sh_cmt_lock, flags);
+       raw_spin_lock_irqsave(&sh_cmt_lock, flags);
        value = sh_cmt_read(p, CMSTR);
 
        if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
                value &= ~(1 << cfg->timer_bit);
 
        sh_cmt_write(p, CMSTR, value);
-       spin_unlock_irqrestore(&sh_cmt_lock, flags);
+       raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
 }
 
 static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
        __sh_cmt_set_next(p, delta);
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 }
 
 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
        int ret = 0;
        unsigned long flags;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
 
        if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
                ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
        if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
                __sh_cmt_set_next(p, p->max_match_value);
  out:
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 
        return ret;
 }
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
        unsigned long flags;
        unsigned long f;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
 
        f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
        p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
        if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
                __sh_cmt_set_next(p, p->max_match_value);
 
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 }
 
 static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
        unsigned long value;
        int has_wrapped;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
        value = p->total_cycles;
        raw = sh_cmt_get_counter(p, &has_wrapped);
 
        if (unlikely(has_wrapped))
                raw += p->match_value + 1;
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 
        return value + raw;
 }
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
                p->max_match_value = (1 << p->width) - 1;
 
        p->match_value = p->max_match_value;
-       spin_lock_init(&p->lock);
+       raw_spin_lock_init(&p->lock);
 
        if (clockevent_rating)
                sh_cmt_register_clockevent(p, name, clockevent_rating);
index a2172f6904180fd9f30d63cfd3cab2448ab9c856..d9b76ca64a611327c0ea79d979482f638e3c14d3 100644 (file)
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
        struct clock_event_device ced;
 };
 
-static DEFINE_SPINLOCK(sh_mtu2_lock);
+static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
 
 #define TSTR -1 /* shared register */
 #define TCR  0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       spin_lock_irqsave(&sh_mtu2_lock, flags);
+       raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
        value = sh_mtu2_read(p, TSTR);
 
        if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
                value &= ~(1 << cfg->timer_bit);
 
        sh_mtu2_write(p, TSTR, value);
-       spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+       raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
 }
 
 static int sh_mtu2_enable(struct sh_mtu2_priv *p)
index 97f54b634be43234d820beb1661b0493bd79637f..c1b51d49d106e90d8f4927cf174983878ad58b2a 100644 (file)
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
        struct clocksource cs;
 };
 
-static DEFINE_SPINLOCK(sh_tmu_lock);
+static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
 
 #define TSTR -1 /* shared register */
 #define TCOR  0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       spin_lock_irqsave(&sh_tmu_lock, flags);
+       raw_spin_lock_irqsave(&sh_tmu_lock, flags);
        value = sh_tmu_read(p, TSTR);
 
        if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
                value &= ~(1 << cfg->timer_bit);
 
        sh_tmu_write(p, TSTR, value);
-       spin_unlock_irqrestore(&sh_tmu_lock, flags);
+       raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
 }
 
 static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
 
        sh_tmu_enable(p);
 
-       /* TODO: calculate good shift from rate and counter bit width */
-
-       ced->shift = 32;
-       ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
-       ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
-       ced->min_delta_ns = 5000;
+       clockevents_config(ced, p->rate);
 
        if (periodic) {
                p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
        ced->set_mode = sh_tmu_clock_event_mode;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
-       clockevents_register_device(ced);
+
+       clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
 
        ret = setup_irq(p->irqaction.irq, &p->irqaction);
        if (ret) {
index e23dc82d43acbb726c0825e3f259c53803cd3c58..7212961575770df8c17d00d666bd11a79553a0b2 100644 (file)
@@ -1626,4 +1626,4 @@ module_exit(dw_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
index fb4f4990f5ebf9f6c8e1f97c9704f8b03642bd18..1dc2a4ad0026d21c43720941a2670cf0518c60e0 100644 (file)
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
 
        init_completion(&sdmac->done);
 
-       sdmac->buf_tail = 0;
-
        return 0;
 out:
 
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
        sdmac->flags = 0;
 
+       sdmac->buf_tail = 0;
+
        dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
                        sg_len, channel);
 
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 
        sdmac->status = DMA_IN_PROGRESS;
 
+       sdmac->buf_tail = 0;
+
        sdmac->flags |= IMX_DMA_SG_LOOP;
        sdmac->direction = direction;
        ret = sdma_load_context(sdmac);
index cbcc28e79be6331570af5ccd3132e12efe67aebc..e4feba6b03c00e6f2ae412a9a1104eef21fb94f4 100644 (file)
@@ -392,6 +392,8 @@ struct pl330_req {
        struct pl330_reqcfg *cfg;
        /* Pointer to first xfer in the request. */
        struct pl330_xfer *x;
+       /* Hook to attach to DMAC's list of reqs with due callback */
+       struct list_head rqd;
 };
 
 /*
@@ -461,8 +463,6 @@ struct _pl330_req {
        /* Number of bytes taken to setup MC for the req */
        u32 mc_len;
        struct pl330_req *r;
-       /* Hook to attach to DMAC's list of reqs with due callback */
-       struct list_head rqd;
 };
 
 /* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
 /* Returns 1 if state was updated, 0 otherwise */
 static int pl330_update(const struct pl330_info *pi)
 {
-       struct _pl330_req *rqdone;
+       struct pl330_req *rqdone, *tmp;
        struct pl330_dmac *pl330;
        unsigned long flags;
        void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
                        if (active == -1) /* Aborted */
                                continue;
 
-                       rqdone = &thrd->req[active];
+                       /* Detach the req */
+                       rqdone = thrd->req[active].r;
+                       thrd->req[active].r = NULL;
+
                        mark_free(thrd, active);
 
                        /* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
        }
 
        /* Now that we are in no hurry, do the callbacks */
-       while (!list_empty(&pl330->req_done)) {
-               struct pl330_req *r;
-
-               rqdone = container_of(pl330->req_done.next,
-                                       struct _pl330_req, rqd);
-
-               list_del_init(&rqdone->rqd);
-
-               /* Detach the req */
-               r = rqdone->r;
-               rqdone->r = NULL;
+       list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
+               list_del(&rqdone->rqd);
 
                spin_unlock_irqrestore(&pl330->lock, flags);
-               _callback(r, PL330_ERR_NONE);
+               _callback(rqdone, PL330_ERR_NONE);
                spin_lock_irqsave(&pl330->lock, flags);
        }
 
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
        /* Pick up ripe tomatoes */
        list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
                if (desc->status == DONE) {
-                       if (pch->cyclic)
+                       if (!pch->cyclic)
                                dma_cookie_complete(&desc->txd);
                        list_move_tail(&desc->node, &list);
                }
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
 }
 
 /* Returns the number of descriptors added to the DMAC pool */
-int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
 {
        struct dma_pl330_desc *desc;
        unsigned long flags;
index 10f375032e9686f7c719c857ceca5dd3f9ef81ed..de5ba86e8b8998df830a0e62647378e4667489f4 100644 (file)
@@ -164,7 +164,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
        else
                return (char *)ptr;
 
-       r = size % align;
+       r = (unsigned long)p % align;
 
        if (r == 0)
                return (char *)ptr;
index d27778f65a5dc3d8f5731cc4e6c1cff7cc6f4a1c..a499c7ed820ae62d8fc489478ef8522e756d833b 100644 (file)
@@ -1814,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
        if (mce->bank != 8)
                return NOTIFY_DONE;
 
-#ifdef CONFIG_SMP
-       /* Only handle if it is the right mc controller */
-       if (mce->socketid != pvt->i7core_dev->socket)
-               return NOTIFY_DONE;
-#endif
-
        smp_rmb();
        if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
                smp_wmb();
@@ -2116,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
        if (pvt->enable_scrub)
                disable_sdram_scrub_setting(mci);
 
-       mce_unregister_decode_chain(&i7_mce_dec);
-
        /* Disable EDAC polling */
        i7core_pci_ctl_release(pvt);
 
@@ -2222,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
        /* DCLK for scrub rate setting */
        pvt->dclk_freq = get_dclk_freq();
 
-       mce_register_decode_chain(&i7_mce_dec);
-
        return 0;
 
 fail0:
@@ -2367,8 +2357,10 @@ static int __init i7core_init(void)
 
        pci_rc = pci_register_driver(&i7core_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               mce_register_decode_chain(&i7_mce_dec);
                return 0;
+       }
 
        i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -2384,6 +2376,7 @@ static void __exit i7core_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&i7core_driver);
+       mce_unregister_decode_chain(&i7_mce_dec);
 }
 
 module_init(i7core_init);
index c6074c5cd1ef49bd492d2e94793686a2710d694f..8c87a5e870577c06b0082ca7d3b19eb7a81a252b 100644 (file)
@@ -5,8 +5,6 @@
 
 #include <asm/mce.h>
 
-#define BIT_64(n)                      (U64_C(1) << (n))
-
 #define EC(x)                          ((x) & 0xffff)
 #define XEC(x, mask)                   (((x) >> 16) & mask)
 
index 4c402353ba98d9aeceb5bbd8062eeec916f39d9d..0e374625f6f894a20272df947ffe2f3b7620422f 100644 (file)
@@ -980,7 +980,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
        layers[1].type = EDAC_MC_LAYER_CHANNEL;
        layers[1].size = 1;
        layers[1].is_virt_csrow = false;
-       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
+       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+                           sizeof(*pdata));
        if (!mci) {
                devres_release_group(&op->dev, mpc85xx_mc_err_probe);
                return -ENOMEM;
index 4adaf4b7da993c3d6b1f25d430dbdf1aa3e7a875..36ad17e79d6183e3f21bf63f3266dc169f3941a0 100644 (file)
@@ -555,7 +555,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
                pvt->is_close_pg = false;
        }
 
-       pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+       pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
        if (IS_RDIMM_ENABLED(reg)) {
                /* FIXME: Can also be LRDIMM */
                debugf0("Memory is registered\n");
@@ -1604,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
        debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
                __func__, mci, &sbridge_dev->pdev[0]->dev);
 
-       mce_unregister_decode_chain(&sbridge_mce_dec);
-
        /* Remove MC sysfs nodes */
        edac_mc_del_mc(mci->dev);
 
@@ -1682,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
                goto fail0;
        }
 
-       mce_register_decode_chain(&sbridge_mce_dec);
        return 0;
 
 fail0:
@@ -1811,8 +1808,10 @@ static int __init sbridge_init(void)
 
        pci_rc = pci_register_driver(&sbridge_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               mce_register_decode_chain(&sbridge_mce_dec);
                return 0;
+       }
 
        sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -1828,6 +1827,7 @@ static void __exit sbridge_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&sbridge_driver);
+       mce_unregister_decode_chain(&sbridge_mce_dec);
 }
 
 module_init(sbridge_init);
index 23416e443765ec0d1da62405518866a9e7851370..a4ed30bd9a4182c26168bd3bbc81b206641739a2 100644 (file)
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
        [5] = "Charge-downstream",
        [6] = "MHL",
        [7] = "Dock-desk",
-       [7] = "Dock-card",
-       [8] = "JIG",
+       [8] = "Dock-card",
+       [9] = "JIG",
 
        NULL,
 };
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
 
        extcon_dev_unregister(info->edev);
 
+       kfree(info->edev);
        kfree(info);
 
        return 0;
index f598a700ec15e06d808bd2e00b3bbe86c91be5a2..159aeb07b3baf332a9df0660f082ccfc283fca64 100644 (file)
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
 #if defined(CONFIG_ANDROID)
        if (switch_class)
                ret = class_compat_create_link(switch_class, edev->dev,
-                                              dev);
+                                              NULL);
 #endif /* CONFIG_ANDROID */
 
        spin_lock_init(&edev->lock);
index fe7a07b473363033246afb2437531547cf6bb979..8a0dcc11c7c73e9c1abb64b7537047d58115064b 100644 (file)
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_request_irq;
 
+       platform_set_drvdata(pdev, extcon_data);
        /* Perform initial detection */
        gpio_extcon_work(&extcon_data->work.work);
 
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
        struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
 
        cancel_delayed_work_sync(&extcon_data->work);
+       free_irq(extcon_data->irq, extcon_data);
        gpio_free(extcon_data->gpio);
        extcon_dev_unregister(&extcon_data->edev);
        devm_kfree(&pdev->dev, extcon_data);
index c4067d0141f7c083ea9de58c72dc0ea3122ca300..542f0c04b6958037b3cf1c468e7705f779f8e11b 100644 (file)
@@ -136,7 +136,7 @@ config GPIO_MPC8XXX
 
 config GPIO_MSM_V1
        tristate "Qualcomm MSM GPIO v1"
-       depends on GPIOLIB && ARCH_MSM
+       depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
        help
          Say yes here to support the GPIO interface on ARM v6 based
          Qualcomm MSM chips.  Most of the pins on the MSM can be
index 9e9947cb86a3d4fe29bb018987176275d4476e2c..1077754f8289e37ca639541e8d9dd5936c858e7c 100644 (file)
@@ -98,6 +98,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
 
        return 0;
 }
+EXPORT_SYMBOL(devm_gpio_request_one);
 
 /**
  *      devm_gpio_free - free an interrupt
index c337143b18f8f97d1fa80c61d84cfe9149b75aff..c89c4c1e668d97cf170f4fc07dc5911d83cb52d9 100644 (file)
@@ -398,10 +398,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
        writel(~0, port->base + GPIO_ISR);
 
        if (mxc_gpio_hwtype == IMX21_GPIO) {
-               /* setup one handler for all GPIO interrupts */
-               if (pdev->id == 0)
-                       irq_set_chained_handler(port->irq,
-                                               mx2_gpio_irq_handler);
+               /*
+                * Setup one handler for all GPIO interrupts. Actually setting
+                * the handler is needed only once, but doing it for every port
+                * is more robust and easier.
+                */
+               irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
        } else {
                /* setup one handler for each entry */
                irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
index c4ed1722734c9eadb190c0ccaa19a9c2eb3b2d39..4fbc208c32cfa213812f6356323a36a51c37087c 100644 (file)
@@ -174,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
        if (bank->dbck_enable_mask && !bank->dbck_enabled) {
                clk_enable(bank->dbck);
                bank->dbck_enabled = true;
+
+               __raw_writel(bank->dbck_enable_mask,
+                            bank->base + bank->regs->debounce_en);
        }
 }
 
 static inline void _gpio_dbck_disable(struct gpio_bank *bank)
 {
        if (bank->dbck_enable_mask && bank->dbck_enabled) {
+               /*
+                * Disable debounce before cutting it's clock. If debounce is
+                * enabled but the clock is not, GPIO module seems to be unable
+                * to detect events and generate interrupts at least on OMAP3.
+                */
+               __raw_writel(0, bank->base + bank->regs->debounce_en);
+
                clk_disable(bank->dbck);
                bank->dbck_enabled = false;
        }
@@ -1081,7 +1091,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        bank->is_mpuio = pdata->is_mpuio;
        bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
        bank->loses_context = pdata->loses_context;
-       bank->get_context_loss_count = pdata->get_context_loss_count;
        bank->regs = pdata->regs;
 #ifdef CONFIG_OF_GPIO
        bank->chip.of_node = of_node_get(node);
@@ -1135,6 +1144,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        omap_gpio_chip_init(bank);
        omap_gpio_show_rev(bank);
 
+       if (bank->loses_context)
+               bank->get_context_loss_count = pdata->get_context_loss_count;
+
        pm_runtime_put(bank->dev);
 
        list_add_tail(&bank->node, &omap_gpio_list);
index 7bb00448e13d2cd30140b06c9c1ca635708d9bbd..b6453d0e44add0846373be7aed37f1f41d66b06f 100644 (file)
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void)
        }
 
        /* need to set base address for gpc4 */
-       exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
+       exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
 
        /* need to set base address for gpx */
        chip = &exynos5_gpios_1[21];
index 38416be8ba1186c41b90b7dfd5df070ad3b7acee..6064fb376e11638f8900c6b6cfc9282900fff5ec 100644 (file)
@@ -383,8 +383,9 @@ static int __devinit gsta_probe(struct platform_device *dev)
        }
        spin_lock_init(&chip->lock);
        gsta_gpio_setup(chip);
-       for (i = 0; i < GSTA_NR_GPIO; i++)
-               gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+       if (gpio_pdata)
+               for (i = 0; i < GSTA_NR_GPIO; i++)
+                       gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
 
        /* 384 was used in previous code: be compatible for other drivers */
        err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
index c1ad2884f2edb0b79f3894a9a16662e5776bda51..11f29c82253c4af5fca18b09f26923a8aa3a48a3 100644 (file)
@@ -149,6 +149,9 @@ static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
        tps65910_gpio->gpio_chip.set    = tps65910_gpio_set;
        tps65910_gpio->gpio_chip.get    = tps65910_gpio_get;
        tps65910_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+       tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
+#endif
        if (pdata && pdata->gpio_base)
                tps65910_gpio->gpio_chip.base = pdata->gpio_base;
        else
index 92ea5350dfe96dd6907ea235bb55b2d25faac08c..aa61ad2fcaaa6573e1de350a82d08151c0040289 100644 (file)
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
        struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
        struct wm8994 *wm8994 = wm8994_gpio->wm8994;
 
+       if (value)
+               value = WM8994_GPN_LVL;
+
        return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
-                              WM8994_GPN_DIR, 0);
+                              WM8994_GPN_DIR | WM8994_GPN_LVL, value);
 }
 
 static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
index d7038230b71e7f113e8ae453bce0327528de575a..7053140c65969758f9f22ded7cea130fe0ab7bd5 100644 (file)
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
        {0,}
 };
 
+
+static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+       struct apertures_struct *ap;
+       bool primary = false;
+
+       ap = alloc_apertures(1);
+       ap->ranges[0].base = pci_resource_start(pdev, 0);
+       ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+       primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+       remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+       kfree(ap);
+}
+
 static int __devinit
 cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       cirrus_kick_out_firmware_fb(pdev);
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
index 21bdfa8836f79e1da44efb3e09e3674b31aac661..64ea597cb6d390219abd198a086e131b08b6d54d 100644 (file)
@@ -145,7 +145,7 @@ struct cirrus_device {
                struct ttm_bo_device bdev;
                atomic_t validate_sequence;
        } ttm;
-
+       bool mm_inited;
 };
 
 
index 2ebcd11a5023089a3a7a8d890c08e3a673184860..50e170f879dece492968752212068ebec4ad14d3 100644 (file)
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
                                    pci_resource_len(dev->pdev, 0),
                                    DRM_MTRR_WC);
 
+       cirrus->mm_inited = true;
        return 0;
 }
 
 void cirrus_mm_fini(struct cirrus_device *cirrus)
 {
        struct drm_device *dev = cirrus->dev;
+
+       if (!cirrus->mm_inited)
+               return;
+
        ttm_bo_device_release(&cirrus->ttm.bdev);
 
        cirrus_ttm_global_release(cirrus);
index 92cea9d77ec913e8bc21417dd07ff97e8d27cb9c..08a7aa722d6b8f0d798b7a59ccd5b8146183f497 100644 (file)
@@ -2116,7 +2116,7 @@ out:
        return ret;
 }
 
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
 {
        uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
 
@@ -2185,7 +2185,7 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
        }
 }
 
-static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
 {
        int ret, hsub, vsub, num_planes, i;
 
@@ -3126,7 +3126,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
 EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
 static bool drm_property_change_is_valid(struct drm_property *property,
-                                        __u64 value)
+                                        uint64_t value)
 {
        if (property->flags & DRM_MODE_PROP_IMMUTABLE)
                return false;
@@ -3136,7 +3136,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
                return true;
        } else if (property->flags & DRM_MODE_PROP_BITMASK) {
                int i;
-               __u64 valid_mask = 0;
+               uint64_t valid_mask = 0;
                for (i = 0; i < property->num_values; i++)
                        valid_mask |= (1ULL << property->values[i]);
                return !(value & ~valid_mask);
index 608bddfc7e35ad93ebe7c522c2b2fb77cd8a44b6..a8743c399e83234c976ebdb4b471542a0645c42d 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/export.h>
+#include <linux/module.h>
 #include "drmP.h"
 #include "drm_edid.h"
 #include "drm_edid_modes.h"
@@ -66,6 +66,8 @@
 #define EDID_QUIRK_FIRST_DETAILED_PREFERRED    (1 << 5)
 /* use +hsync +vsync for detailed mode */
 #define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING      (1 << 7)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -120,6 +122,9 @@ static struct edid_quirk {
        /* Samsung SyncMaster 22[5-6]BW */
        { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
        { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* ViewSonic VA2026w */
+       { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 };
 
 /*** DDC fetch and block validation ***/
@@ -144,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
 }
 EXPORT_SYMBOL(drm_edid_header_is_valid);
 
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+                "Minimum number of valid EDID header bytes (0-8, default 6)");
 
 /*
  * Sanity check the EDID block (base or extension).  Return 0 if the block
@@ -155,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
        u8 csum = 0;
        struct edid *edid = (struct edid *)raw_edid;
 
+       if (edid_fixup > 8 || edid_fixup < 0)
+               edid_fixup = 6;
+
        if (block == 0) {
                int score = drm_edid_header_is_valid(raw_edid);
                if (score == 8) ;
-               else if (score >= 6) {
+               else if (score >= edid_fixup) {
                        DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
                        memcpy(raw_edid, edid_header, sizeof(edid_header));
                } else {
@@ -598,7 +610,7 @@ static bool
 drm_monitor_supports_rb(struct edid *edid)
 {
        if (edid->revision >= 4) {
-               bool ret;
+               bool ret = false;
                drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
                return ret;
        }
@@ -885,12 +897,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
                                "Wrong Hsync/Vsync pulse width\n");
                return NULL;
        }
+
+       if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+               mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+               if (!mode)
+                       return NULL;
+
+               goto set_size;
+       }
+
        mode = drm_mode_create(dev);
        if (!mode)
                return NULL;
 
-       mode->type = DRM_MODE_TYPE_DRIVER;
-
        if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
                timing->pixel_clock = cpu_to_le16(1088);
 
@@ -914,8 +933,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
 
        drm_mode_do_interlace_quirk(mode, pt);
 
-       drm_mode_set_name(mode);
-
        if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
                pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
        }
@@ -925,6 +942,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
        mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
                DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 
+set_size:
        mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
        mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
 
@@ -938,6 +956,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
                mode->height_mm = edid->height_cm * 10;
        }
 
+       mode->type = DRM_MODE_TYPE_DRIVER;
+       drm_mode_set_name(mode);
+
        return mode;
 }
 
@@ -1018,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
        return true;
 }
 
+static bool valid_inferred_mode(const struct drm_connector *connector,
+                               const struct drm_display_mode *mode)
+{
+       struct drm_display_mode *m;
+       bool ok = false;
+
+       list_for_each_entry(m, &connector->probed_modes, head) {
+               if (mode->hdisplay == m->hdisplay &&
+                   mode->vdisplay == m->vdisplay &&
+                   drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+                       return false; /* duplicated */
+               if (mode->hdisplay <= m->hdisplay &&
+                   mode->vdisplay <= m->vdisplay)
+                       ok = true;
+       }
+       return ok;
+}
+
 static int
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        struct detailed_timing *timing)
@@ -1027,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
        struct drm_device *dev = connector->dev;
 
        for (i = 0; i < drm_num_dmt_modes; i++) {
-               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+                   valid_inferred_mode(connector, drm_dmt_modes + i)) {
                        newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
@@ -1067,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
@@ -1095,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
index 420953197d0ae76c73aa5ef2cb4e5ff8271ee6f7..d6de2e07fa034ea33d796cd6ad97c5902e369924 100644 (file)
@@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = {
 };
 
 static struct drm_driver exynos_drm_driver = {
-       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
-                                 DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+                                       DRIVER_GEM | DRIVER_PRIME,
        .load                   = exynos_drm_load,
        .unload                 = exynos_drm_unload,
        .open                   = exynos_drm_open,
index 6e9ac7bd1dcf7bb2c2ad2883ea17c53c61459cfd..23d5ad379f86743a1be36effb214d43f0ff17456 100644 (file)
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
                manager_ops->commit(manager->dev);
 }
 
-static struct drm_crtc *
-exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
-       return encoder->crtc;
-}
-
 static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
        .dpms           = exynos_drm_encoder_dpms,
        .mode_fixup     = exynos_drm_encoder_mode_fixup,
        .mode_set       = exynos_drm_encoder_mode_set,
        .prepare        = exynos_drm_encoder_prepare,
        .commit         = exynos_drm_encoder_commit,
-       .get_crtc       = exynos_drm_encoder_get_crtc,
 };
 
 static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
index f82a299553fb9278d8ebde3ede5382b521f9e525..4ccfe4328fab130ab1e7c1269ff793ac8a9ee004 100644 (file)
@@ -51,11 +51,22 @@ struct exynos_drm_fb {
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+       unsigned int i;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        drm_framebuffer_cleanup(fb);
 
+       for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+               struct drm_gem_object *obj;
+
+               if (exynos_fb->exynos_gem_obj[i] == NULL)
+                       continue;
+
+               obj = &exynos_fb->exynos_gem_obj[i]->base;
+               drm_gem_object_unreference_unlocked(obj);
+       }
+
        kfree(exynos_fb);
        exynos_fb = NULL;
 }
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-ENOENT);
        }
 
-       drm_gem_object_unreference_unlocked(obj);
-
        fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
-       if (IS_ERR(fb))
+       if (IS_ERR(fb)) {
+               drm_gem_object_unreference_unlocked(obj);
                return fb;
+       }
 
        exynos_fb = to_exynos_fb(fb);
        nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                        return ERR_PTR(-ENOENT);
                }
 
-               drm_gem_object_unreference_unlocked(obj);
-
                exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
        }
 
index 3ecb30d93552dbae84a7c555af2fda02ff754a1d..50823756cdea903ca4c94c95554a727c2328687d 100644 (file)
 static inline int exynos_drm_format_num_buffers(uint32_t format)
 {
        switch (format) {
-       case DRM_FORMAT_NV12M:
+       case DRM_FORMAT_NV12:
        case DRM_FORMAT_NV12MT:
                return 2;
-       case DRM_FORMAT_YUV420M:
+       case DRM_FORMAT_YUV420:
                return 3;
        default:
                return 1;
index fc91293c456041ff995c4eb2eda340df7e39a2f5..5c8b683029ea64c0938c1054aa1157c4eb94e694 100644 (file)
@@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                                   struct drm_device *dev, uint32_t handle,
                                   uint64_t *offset)
 {
-       struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
        int ret = 0;
 
@@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                goto unlock;
        }
 
-       exynos_gem_obj = to_exynos_gem_obj(obj);
-
-       if (!exynos_gem_obj->base.map_list.map) {
-               ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+       if (!obj->map_list.map) {
+               ret = drm_gem_create_mmap_offset(obj);
                if (ret)
                        goto out;
        }
 
-       *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
 out:
index 68ef010283751403135261cb1bed9f821c03c599..e2147a2ddcecab72570db6f3055aabdb18fe85ea 100644 (file)
@@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        switch (win_data->pixel_format) {
        case DRM_FORMAT_NV12MT:
                tiled_mode = true;
-       case DRM_FORMAT_NV12M:
+       case DRM_FORMAT_NV12:
                crcb_mode = false;
                buf_num = 2;
                break;
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx)
        mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
 
        /* setting graphical layers */
-
        val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
        val |= MXR_GRP_CFG_WIN_BLEND_EN;
+       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
        val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
 
        /* the same configuration for both layers */
        mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
-       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
-       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
        mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
 
+       /* setting video layers */
+       val = MXR_GRP_CFG_ALPHA_VAL(0);
+       mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
        /* configuration of Video Processor Registers */
        vp_win_reset(ctx);
        vp_default_filter(res);
index 9764045428ce345a98721cc3e521804ab9b0e608..b7e7b49d8f627b77c290bf9491c87d508b7e3377 100644 (file)
@@ -78,21 +78,6 @@ static int cdv_backlight_combination_mode(struct drm_device *dev)
        return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
 }
 
-static int cdv_get_brightness(struct backlight_device *bd)
-{
-       struct drm_device *dev = bl_get_data(bd);
-       u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-
-       if (cdv_backlight_combination_mode(dev)) {
-               u8 lbpc;
-
-               val &= ~1;
-               pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
-               val *= lbpc;
-       }
-       return val;
-}
-
 static u32 cdv_get_max_backlight(struct drm_device *dev)
 {
        u32 max = REG_READ(BLC_PWM_CTL);
@@ -110,6 +95,22 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
        return max;
 }
 
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+       struct drm_device *dev = bl_get_data(bd);
+       u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+
+       if (cdv_backlight_combination_mode(dev)) {
+               u8 lbpc;
+
+               val &= ~1;
+               pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+               val *= lbpc;
+       }
+       return (val * 100)/cdv_get_max_backlight(dev);
+
+}
+
 static int cdv_set_brightness(struct backlight_device *bd)
 {
        struct drm_device *dev = bl_get_data(bd);
@@ -120,6 +121,9 @@ static int cdv_set_brightness(struct backlight_device *bd)
        if (level < 1)
                level = 1;
 
+       level *= cdv_get_max_backlight(dev);
+       level /= 100;
+
        if (cdv_backlight_combination_mode(dev)) {
                u32 max = cdv_get_max_backlight(dev);
                u8 lbpc;
@@ -157,7 +161,6 @@ static int cdv_backlight_init(struct drm_device *dev)
 
        cdv_backlight_device->props.brightness =
                        cdv_get_brightness(cdv_backlight_device);
-       cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
        backlight_update_status(cdv_backlight_device);
        dev_priv->backlight_device = cdv_backlight_device;
        return 0;
index 4f186eca3a3039dc804775a80e55f461c2ff7792..c430bd424681c0b28625972a796aeb96b0524423 100644 (file)
@@ -144,6 +144,8 @@ struct opregion_asle {
 
 #define ASLE_CBLV_VALID         (1<<31)
 
+static struct psb_intel_opregion *system_opregion;
+
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
@@ -205,7 +207,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct opregion_asle *asle = dev_priv->opregion.asle;
 
-       if (asle) {
+       if (asle && system_opregion ) {
                /* Don't do this on Medfield or other non PC like devices, they
                   use the bit for something different altogether */
                psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
@@ -221,7 +223,6 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
 #define ACPI_EV_LID            (1<<1)
 #define ACPI_EV_DOCK           (1<<2)
 
-static struct psb_intel_opregion *system_opregion;
 
 static int psb_intel_opregion_video_event(struct notifier_block *nb,
                                          unsigned long val, void *data)
@@ -266,9 +267,6 @@ void psb_intel_opregion_init(struct drm_device *dev)
                system_opregion = opregion;
                register_acpi_notifier(&psb_intel_opregion_notifier);
        }
-
-       if (opregion->asle)
-               psb_intel_opregion_enable_asle(dev);
 }
 
 void psb_intel_opregion_fini(struct drm_device *dev)
index 72dc6b9212656d67a4356149aede32c5691d4e60..4a90f8b0e16cb122ac0ebd62c6b8ddfcd9df9783 100644 (file)
@@ -27,6 +27,7 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
 extern void psb_intel_opregion_init(struct drm_device *dev);
 extern void psb_intel_opregion_fini(struct drm_device *dev);
 extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
 
 #else
 
@@ -46,4 +47,8 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev)
 {
        return 0;
 }
+
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+}
 #endif
index eff039bf92d40ae37fa1481cb5ec7006036ffae4..5971bc82b765cd87e6b32a347719ee3a5a218a64 100644 (file)
@@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
        psb_backlight_device->props.max_brightness = 100;
        backlight_update_status(psb_backlight_device);
        dev_priv->backlight_device = psb_backlight_device;
+
+       /* This must occur after the backlight is properly initialised */
+       psb_lid_timer_init(dev_priv);
+
        return 0;
 }
 
@@ -354,13 +358,6 @@ static int psb_chip_setup(struct drm_device *dev)
        return 0;
 }
 
-/* Not exactly an erratum more an irritation */
-static void psb_chip_errata(struct drm_device *dev)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-       psb_lid_timer_init(dev_priv);
-}
-
 static void psb_chip_teardown(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
@@ -379,7 +376,6 @@ const struct psb_ops psb_chip_ops = {
        .sgx_offset = PSB_SGX_OFFSET,
        .chip_setup = psb_chip_setup,
        .chip_teardown = psb_chip_teardown,
-       .errata = psb_chip_errata,
 
        .crtc_helper = &psb_intel_helper_funcs,
        .crtc_funcs = &psb_intel_crtc_funcs,
index caba6e08693cb83119f3f22e32ba8f14e1b4fbdd..a8858a907f47b8ce2944157bbbd6fb1d3b562a10 100644 (file)
@@ -374,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 
        if (ret)
                return ret;
+       psb_intel_opregion_enable_asle(dev);
 #if 0
        /*enable runtime pm at last*/
        pm_runtime_enable(&dev->pdev->dev);
index f920fb5e42b63846e3d8b7b782b492e547e18eef..fa9439159ebd6bc85cdf4e27a307d9cde12dcbd6 100644 (file)
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
                return -EINVAL;
 
        /* This is all entirely broken */
-       down_write(&current->mm->mmap_sem);
        old_fops = file_priv->filp->f_op;
        file_priv->filp->f_op = &i810_buffer_fops;
        dev_priv->mmap_buffer = buf;
-       buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+       buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
                                            PROT_READ | PROT_WRITE,
                                            MAP_SHARED, buf->bus_address);
        dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
                retcode = PTR_ERR(buf_priv->virtual);
                buf_priv->virtual = NULL;
        }
-       up_write(&current->mm->mmap_sem);
 
        return retcode;
 }
index eb2b3c25b9e12b19c2113444d5d2f8e175756ba6..5363e9c66c27e4fe1a4be0fecf26dd4263920be6 100644 (file)
@@ -2032,6 +2032,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
                                 1, minor);
        drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
                                 1, minor);
+       drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+                                1, minor);
 }
 
 #endif /* CONFIG_DEBUG_FS */
index f94792626b94fadae47303f150e7aff278d371ef..36822b924eb12974f7c73af8fe8368707ab08abf 100644 (file)
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
        }
 }
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->dev->pdev;
+       bool primary;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = dev_priv->dev->agp->base;
+       ap->ranges[0].size =
+               dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
+               goto put_bridge;
+       }
+
+       i915_kick_out_firmware_fb(dev_priv);
+
        pci_set_master(dev->pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto out_rmmap;
-       }
-
        aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
        dev_priv->mm.gtt_mapping =
index 238a521658330bbf7468b95b351148436d383602..9fe9ebe52a7ade8424472b4388b903afb5f681c0 100644 (file)
@@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
@@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_d_info = {
@@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
@@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_valleyview_m_info = {
@@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
@@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
@@ -1139,10 +1145,9 @@ MODULE_LICENSE("GPL and additional rights");
 
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
-       (((dev_priv)->info->gen >= 6) && \
-        ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE)) && \
-       (!IS_VALLEYVIEW((dev_priv)->dev))
+       ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
index 377c21f531e49ba93bdcb31aaaff290b15292bc2..b0b676abde0de1314f1181f79f1667d35f8eee67 100644 (file)
@@ -285,6 +285,7 @@ struct intel_device_info {
        u8 is_ivybridge:1;
        u8 is_valleyview:1;
        u8 has_pch_split:1;
+       u8 has_force_wake:1;
        u8 is_haswell:1;
        u8 has_fbc:1;
        u8 has_pipe_cxsr:1;
@@ -942,6 +943,9 @@ struct drm_i915_gem_object {
 
        /* prime dma-buf support */
        struct sg_table *sg_table;
+       void *dma_buf_vmapping;
+       int vmapping_count;
+
        /**
         * Used for performing relocations during execbuffer insertion.
         */
@@ -1098,6 +1102,8 @@ struct drm_i915_file_private {
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
 #include "i915_trace.h"
 
 /**
index c1e5c66553dfcff66892876f854f3e708d92ac93..288d7b8f49ae48858a30c6ad1f9f7ce1d6d6e600 100644 (file)
@@ -2063,10 +2063,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (obj->gtt_space == NULL)
                return 0;
 
-       if (obj->pin_count != 0) {
-               DRM_ERROR("Attempting to unbind pinned buffer\n");
-               return -EINVAL;
-       }
+       if (obj->pin_count)
+               return -EBUSY;
 
        ret = i915_gem_object_finish_gpu(obj);
        if (ret)
@@ -3293,6 +3291,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
+       u32 mask;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (obj == NULL)
@@ -3303,8 +3302,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                return NULL;
        }
 
+       mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+       if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+               /* 965gm cannot relocate objects above 4GiB. */
+               mask &= ~__GFP_HIGHMEM;
+               mask |= __GFP_DMA32;
+       }
+
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-       mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+       mapping_set_gfp_mask(mapping, mask);
 
        i915_gem_info_add_obj(dev_priv, size);
 
index 8e269178d6a5a65c2f5ab8068ac86d283711fab6..aa308e1337db7c8bacbf73d00dc80badecb617a2 100644 (file)
@@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
        }
 }
 
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+       struct drm_i915_gem_object *obj = dma_buf->priv;
+       struct drm_device *dev = obj->base.dev;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (obj->dma_buf_vmapping) {
+               obj->vmapping_count++;
+               goto out_unlock;
+       }
+
+       if (!obj->pages) {
+               ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+               if (ret) {
+                       mutex_unlock(&dev->struct_mutex);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+       if (!obj->dma_buf_vmapping) {
+               DRM_ERROR("failed to vmap object\n");
+               goto out_unlock;
+       }
+
+       obj->vmapping_count = 1;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return obj->dma_buf_vmapping;
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+       struct drm_i915_gem_object *obj = dma_buf->priv;
+       struct drm_device *dev = obj->base.dev;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return;
+
+       --obj->vmapping_count;
+       if (obj->vmapping_count == 0) {
+               vunmap(obj->dma_buf_vmapping);
+               obj->dma_buf_vmapping = NULL;
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
 {
        return NULL;
@@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
 
 }
 
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
        .map_dma_buf = i915_gem_map_dma_buf,
        .unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
        .kunmap = i915_gem_dmabuf_kunmap,
        .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+       .mmap = i915_gem_dmabuf_mmap,
+       .vmap = i915_gem_dmabuf_vmap,
+       .vunmap = i915_gem_dmabuf_vunmap,
 };
 
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
index cc4a633076110bce0f844d1c24e8c739c9b6917c..ed3224c3742389c6d9709039e1778cb806dc69a2 100644 (file)
@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    rps_work);
-       u8 new_delay = dev_priv->cur_delay;
        u32 pm_iir, pm_imr;
+       u8 new_delay;
 
        spin_lock_irq(&dev_priv->rps_lock);
        pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
        I915_WRITE(GEN6_PMIMR, 0);
        spin_unlock_irq(&dev_priv->rps_lock);
 
-       if (!pm_iir)
+       if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
                return;
 
        mutex_lock(&dev_priv->dev->struct_mutex);
-       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               if (dev_priv->cur_delay != dev_priv->max_delay)
-                       new_delay = dev_priv->cur_delay + 1;
-               if (new_delay > dev_priv->max_delay)
-                       new_delay = dev_priv->max_delay;
-       } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
-               gen6_gt_force_wake_get(dev_priv);
-               if (dev_priv->cur_delay != dev_priv->min_delay)
-                       new_delay = dev_priv->cur_delay - 1;
-               if (new_delay < dev_priv->min_delay) {
-                       new_delay = dev_priv->min_delay;
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
-                                  ((new_delay << 16) & 0x3f0000));
-               } else {
-                       /* Make sure we continue to get down interrupts
-                        * until we hit the minimum frequency */
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
-               }
-               gen6_gt_force_wake_put(dev_priv);
-       }
+
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+               new_delay = dev_priv->cur_delay + 1;
+       else
+               new_delay = dev_priv->cur_delay - 1;
 
        gen6_set_rps(dev_priv->dev, new_delay);
-       dev_priv->cur_delay = new_delay;
 
-       /*
-        * rps_lock not held here because clearing is non-destructive. There is
-        * an *extremely* unlikely race with gen6_rps_enable() that is prevented
-        * by holding struct_mutex for the duration of the write.
-        */
        mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
@@ -435,7 +412,6 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
         */
 
        spin_lock_irqsave(&dev_priv->rps_lock, flags);
-       WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
        dev_priv->pm_iir |= pm_iir;
        I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
        POSTING_READ(GEN6_PMIMR);
@@ -533,7 +509,7 @@ out:
        return ret;
 }
 
-static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -573,6 +549,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
                DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 }
 
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
+
+       if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+               DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+                                (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+                                SDE_AUDIO_POWER_SHIFT_CPT);
+
+       if (pch_iir & SDE_AUX_MASK_CPT)
+               DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+       if (pch_iir & SDE_GMBUS_CPT)
+               DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+       if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+               DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+       if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+               DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+       if (pch_iir & SDE_FDI_MASK_CPT)
+               for_each_pipe(pipe)
+                       DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+                                        pipe_name(pipe),
+                                        I915_READ(FDI_RX_IIR(pipe)));
+}
+
 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -614,7 +619,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 
                        if (pch_iir & SDE_HOTPLUG_MASK_CPT)
                                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-                       pch_irq_handler(dev, pch_iir);
+                       cpt_irq_handler(dev, pch_iir);
 
                        /* clear PCH hotplug event before clear CPU irq */
                        I915_WRITE(SDEIIR, pch_iir);
@@ -707,7 +712,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
        if (de_iir & DE_PCH_EVENT) {
                if (pch_iir & hotplug_mask)
                        queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-               pch_irq_handler(dev, pch_iir);
+               if (HAS_PCH_CPT(dev))
+                       cpt_irq_handler(dev, pch_iir);
+               else
+                       ibx_irq_handler(dev, pch_iir);
        }
 
        if (de_iir & DE_PCU_EVENT) {
index 2d49b9507ed05b2f0693e3c52a22b36dc028b3da..48d5e8e051cf6a1057f247457e65045e679051ab 100644 (file)
 #define MI_DISPLAY_FLIP                MI_INSTR(0x14, 2)
 #define MI_DISPLAY_FLIP_I915   MI_INSTR(0x14, 1)
 #define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+
 #define MI_SET_CONTEXT         MI_INSTR(0x18, 0)
 #define   MI_MM_SPACE_GTT              (1<<8)
 #define   MI_MM_SPACE_PHYSICAL         (0<<8)
 
 /* PCH */
 
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
 #define SDE_AUDIO_POWER_D      (1 << 27)
 #define SDE_AUDIO_POWER_C      (1 << 26)
 #define SDE_AUDIO_POWER_B      (1 << 25)
 #define SDE_TRANSA_CRC_ERR     (1 << 1)
 #define SDE_TRANSA_FIFO_UNDER  (1 << 0)
 #define SDE_TRANS_MASK         (0x3f)
-/* CPT */
-#define SDE_CRT_HOTPLUG_CPT    (1 << 19)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT  (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT  (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT  (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT   29
+#define SDE_AUDIO_POWER_MASK_CPT    (7 << 29)
+#define SDE_AUXD_CPT           (1 << 27)
+#define SDE_AUXC_CPT           (1 << 26)
+#define SDE_AUXB_CPT           (1 << 25)
+#define SDE_AUX_MASK_CPT       (7 << 25)
 #define SDE_PORTD_HOTPLUG_CPT  (1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT  (1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT  (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT    (1 << 19)
 #define SDE_HOTPLUG_MASK_CPT   (SDE_CRT_HOTPLUG_CPT |          \
                                 SDE_PORTD_HOTPLUG_CPT |        \
                                 SDE_PORTC_HOTPLUG_CPT |        \
                                 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT          (1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT                (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT                (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT                (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT   (SDE_AUDIO_CP_REQ_C_CPT | \
+                                SDE_AUDIO_CP_REQ_B_CPT | \
+                                SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT   (SDE_AUDIO_CP_CHG_C_CPT | \
+                                SDE_AUDIO_CP_CHG_B_CPT | \
+                                SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT       (SDE_FDI_RXC_CPT | \
+                                SDE_FDI_RXB_CPT | \
+                                SDE_FDI_RXA_CPT)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
index 0ede02a99d914544d145b043b1ea066b5254bb02..a748e5cabe14e1e3ca4550db29078e0c45464682 100644 (file)
@@ -740,8 +740,11 @@ static void i915_restore_display(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
                I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
-               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+               /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+                * otherwise we get blank eDP screen after S3 on some machines
+                */
                I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
                I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
index ee61ad1e642b06848f537fb6000b3d83c3bf1be1..a8538ac0299dac4252394b1fc28d9c3f58e173cc 100644 (file)
@@ -910,9 +910,10 @@ static void assert_pll(struct drm_i915_private *dev_priv,
 
 /* For ILK+ */
 static void assert_pch_pll(struct drm_i915_private *dev_priv,
-                          struct intel_crtc *intel_crtc, bool state)
+                          struct intel_pch_pll *pll,
+                          struct intel_crtc *crtc,
+                          bool state)
 {
-       int reg;
        u32 val;
        bool cur_state;
 
@@ -921,30 +922,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
                return;
        }
 
-       if (!intel_crtc->pch_pll) {
-               WARN(1, "asserting PCH PLL enabled with no PLL\n");
+       if (WARN (!pll,
+                 "asserting PCH PLL %s with no PLL\n", state_string(state)))
                return;
-       }
 
-       if (HAS_PCH_CPT(dev_priv->dev)) {
+       val = I915_READ(pll->pll_reg);
+       cur_state = !!(val & DPLL_VCO_ENABLE);
+       WARN(cur_state != state,
+            "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+            pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+       /* Make sure the selected PLL is correctly attached to the transcoder */
+       if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
                u32 pch_dpll;
 
                pch_dpll = I915_READ(PCH_DPLL_SEL);
-
-               /* Make sure the selected PLL is enabled to the transcoder */
-               WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
-                    "transcoder %d PLL not enabled\n", intel_crtc->pipe);
+               cur_state = pll->pll_reg == _PCH_DPLL_B;
+               if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+                         "PLL[%d] not attached to this transcoder %d: %08x\n",
+                         cur_state, crtc->pipe, pch_dpll)) {
+                       cur_state = !!(val >> (4*crtc->pipe + 3));
+                       WARN(cur_state != state,
+                            "PLL[%d] not %s on this transcoder %d: %08x\n",
+                            pll->pll_reg == _PCH_DPLL_B,
+                            state_string(state),
+                            crtc->pipe,
+                            val);
+               }
        }
-
-       reg = intel_crtc->pch_pll->pll_reg;
-       val = I915_READ(reg);
-       cur_state = !!(val & DPLL_VCO_ENABLE);
-       WARN(cur_state != state,
-            "PCH PLL state assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
 }
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
                          enum pipe pipe, bool state)
@@ -1424,7 +1432,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
        assert_pch_refclk_enabled(dev_priv);
 
        if (pll->active++ && pll->on) {
-               assert_pch_pll_enabled(dev_priv, intel_crtc);
+               assert_pch_pll_enabled(dev_priv, pll, NULL);
                return;
        }
 
@@ -1460,12 +1468,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
                      intel_crtc->base.base.id);
 
        if (WARN_ON(pll->active == 0)) {
-               assert_pch_pll_disabled(dev_priv, intel_crtc);
+               assert_pch_pll_disabled(dev_priv, pll, NULL);
                return;
        }
 
        if (--pll->active) {
-               assert_pch_pll_enabled(dev_priv, intel_crtc);
+               assert_pch_pll_enabled(dev_priv, pll, NULL);
                return;
        }
 
@@ -1495,7 +1503,9 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
        BUG_ON(dev_priv->info->gen < 5);
 
        /* Make sure PCH DPLL is enabled */
-       assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
+       assert_pch_pll_enabled(dev_priv,
+                              to_intel_crtc(crtc)->pch_pll,
+                              to_intel_crtc(crtc));
 
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, pipe);
@@ -6148,17 +6158,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+       uint32_t plane_bit = 0;
        int ret;
 
        ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
        if (ret)
                goto err;
 
+       switch(intel_crtc->plane) {
+       case PLANE_A:
+               plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+               break;
+       case PLANE_B:
+               plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+               break;
+       case PLANE_C:
+               plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+               break;
+       default:
+               WARN_ONCE(1, "unknown plane in flip command\n");
+               ret = -ENODEV;
+               goto err;
+       }
+
        ret = intel_ring_begin(ring, 4);
        if (ret)
                goto err_unpin;
 
-       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, (obj->gtt_offset));
        intel_ring_emit(ring, (MI_NOOP));
@@ -6531,7 +6558,7 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(HDMIC) & PORT_DETECTED)
                        intel_hdmi_init(dev, HDMIC);
 
-               if (I915_READ(HDMID) & PORT_DETECTED)
+               if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
                        intel_hdmi_init(dev, HDMID);
 
                if (I915_READ(PCH_DP_C) & DP_DETECTED)
@@ -6894,19 +6921,6 @@ static void i915_disable_vga(struct drm_device *dev)
        POSTING_READ(vga_reg);
 }
 
-static void ivb_pch_pwm_override(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /*
-        * IVB has CPU eDP backlight regs too, set things up to let the
-        * PCH regs control the backlight
-        */
-       I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
-       I915_WRITE(BLC_PWM_CPU_CTL, 0);
-       I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
-}
-
 void intel_modeset_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6923,9 +6937,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
                gen6_enable_rps(dev_priv);
                gen6_update_ring_freq(dev_priv);
        }
-
-       if (IS_IVYBRIDGE(dev))
-               ivb_pch_pwm_override(dev);
 }
 
 void intel_modeset_init(struct drm_device *dev)
index 71c7096e386950f5fc8f2fbad253dc80cd764e16..c0449324143cb62f1ecd837e042f28af39682d77 100644 (file)
@@ -32,6 +32,7 @@
 #include "drm.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_edid.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
        struct drm_display_mode *panel_fixed_mode;  /* for eDP */
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
+       struct edid *edid; /* cached EDID for eDP */
+       int edid_mode_count;
 };
 
 /**
@@ -266,6 +269,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        if (mode->clock < 10000)
                return MODE_CLOCK_LOW;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               return MODE_H_ILLEGAL;
+
        return MODE_OK;
 }
 
@@ -368,7 +374,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        int recv_bytes;
        uint32_t status;
        uint32_t aux_clock_divider;
-       int try, precharge = 5;
+       int try, precharge;
 
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
@@ -388,6 +394,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        else
                aux_clock_divider = intel_hrawclk(dev) / 2;
 
+       if (IS_GEN6(dev))
+               precharge = 3;
+       else
+               precharge = 5;
+
        /* Try to wait for any previous AUX channel activity */
        for (try = 0; try < 3; try++) {
                status = I915_READ(ch_ctl);
@@ -702,6 +713,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                mode->clock = intel_dp->panel_fixed_mode->clock;
        }
 
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               return false;
+
        DRM_DEBUG_KMS("DP link computation with max lane count %i "
                      "max bw %02x pixel clock %iKHz\n",
                      max_lane_count, bws[max_clock], mode->clock);
@@ -1154,11 +1168,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 
        DRM_DEBUG_KMS("Turn eDP power off\n");
 
-       WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
-       ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
+       WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
        pp = ironlake_get_pp_control(dev_priv);
-       pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
 
@@ -1266,18 +1279,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+
+       /* Make sure the panel is off before trying to change the mode. But also
+        * ensure that we have vdd while we switch off the panel. */
+       ironlake_edp_panel_vdd_on(intel_dp);
        ironlake_edp_backlight_off(intel_dp);
        ironlake_edp_panel_off(intel_dp);
 
-       /* Wake up the sink first */
-       ironlake_edp_panel_vdd_on(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_link_down(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, false);
-
-       /* Make sure the panel is off before trying to
-        * change the mode
-        */
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1309,10 +1320,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
        if (mode != DRM_MODE_DPMS_ON) {
+               /* Switching the panel off requires vdd. */
+               ironlake_edp_panel_vdd_on(intel_dp);
                ironlake_edp_backlight_off(intel_dp);
                ironlake_edp_panel_off(intel_dp);
 
-               ironlake_edp_panel_vdd_on(intel_dp);
                intel_dp_sink_dpms(intel_dp, mode);
                intel_dp_link_down(intel_dp);
                ironlake_edp_panel_vdd_off(intel_dp, false);
@@ -1969,6 +1981,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
+       ironlake_edp_panel_vdd_on(intel_dp);
+
        if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
@@ -1976,6 +1990,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
+
+       ironlake_edp_panel_vdd_off(intel_dp, false);
 }
 
 static bool
@@ -2112,10 +2128,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct edid     *edid;
+       int size;
+
+       if (is_edp(intel_dp)) {
+               if (!intel_dp->edid)
+                       return NULL;
+
+               size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+               edid = kmalloc(size, GFP_KERNEL);
+               if (!edid)
+                       return NULL;
+
+               memcpy(edid, intel_dp->edid, size);
+               return edid;
+       }
 
-       ironlake_edp_panel_vdd_on(intel_dp);
        edid = drm_get_edid(connector, adapter);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
        return edid;
 }
 
@@ -2125,9 +2153,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        int     ret;
 
-       ironlake_edp_panel_vdd_on(intel_dp);
+       if (is_edp(intel_dp)) {
+               drm_mode_connector_update_edid_property(connector,
+                                                       intel_dp->edid);
+               ret = drm_add_edid_modes(connector, intel_dp->edid);
+               drm_edid_to_eld(connector,
+                               intel_dp->edid);
+               connector->display_info.raw_edid = NULL;
+               return intel_dp->edid_mode_count;
+       }
+
        ret = intel_ddc_get_modes(connector, adapter);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
        return ret;
 }
 
@@ -2317,6 +2353,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        i2c_del_adapter(&intel_dp->adapter);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
+               kfree(intel_dp->edid);
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                ironlake_panel_vdd_off_sync(intel_dp);
        }
@@ -2500,11 +2537,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        break;
        }
 
+       intel_dp_i2c_init(intel_dp, intel_connector, name);
+
        /* Cache some DPCD data in the eDP case */
        if (is_edp(intel_dp)) {
                bool ret;
                struct edp_power_seq    cur, vbt;
                u32 pp_on, pp_off, pp_div;
+               struct edid *edid;
 
                pp_on = I915_READ(PCH_PP_ON_DELAYS);
                pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2572,9 +2612,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        intel_dp_destroy(&intel_connector->base);
                        return;
                }
-       }
 
-       intel_dp_i2c_init(intel_dp, intel_connector, name);
+               ironlake_edp_panel_vdd_on(intel_dp);
+               edid = drm_get_edid(connector, &intel_dp->adapter);
+               if (edid) {
+                       drm_mode_connector_update_edid_property(connector,
+                                                               edid);
+                       intel_dp->edid_mode_count =
+                               drm_add_edid_modes(connector, edid);
+                       drm_edid_to_eld(connector, edid);
+                       intel_dp->edid = edid;
+               }
+               ironlake_edp_panel_vdd_off(intel_dp, false);
+       }
 
        intel_encoder->hot_plug = intel_dp_hot_plug;
 
index 4a9707dd0f9c1885644b9847cfecd062a8bd8872..1991a4408cf9e10896bd5295ba4b3b4b1d74856b 100644 (file)
@@ -396,11 +396,22 @@ clear_err:
         * Wait for bus to IDLE before clearing NAK.
         * If we clear the NAK while bus is still active, then it will stay
         * active and the next transaction may fail.
+        *
+        * If no ACK is received during the address phase of a transaction, the
+        * adapter must report -ENXIO. It is not clear what to return if no ACK
+        * is received at other times. But we have to be careful to not return
+        * spurious -ENXIO because that will prevent i2c and drm edid functions
+        * from retrying. So return -ENXIO only when gmbus properly quiescents -
+        * timing out seems to happen when there _is_ a ddc chip present, but
+        * it's slow responding and only answers on the 2nd retry.
         */
+       ret = -ENXIO;
        if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
-                    10))
+                    10)) {
                DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
                              adapter->name);
+               ret = -ETIMEDOUT;
+       }
 
        /* Toggle the Software Clear Interrupt bit. This has the effect
         * of resetting the GMBUS controller and so clearing the
@@ -414,14 +425,6 @@ clear_err:
                         adapter->name, msgs[i].addr,
                         (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
-       /*
-        * If no ACK is received during the address phase of a transaction,
-        * the adapter must report -ENXIO.
-        * It is not clear what to return if no ACK is received at other times.
-        * So, we always return -ENXIO in all NAK cases, to ensure we send
-        * it at least during the one case that is specified.
-        */
-       ret = -ENXIO;
        goto out;
 
 timeout:
index 9dee82350defb0590ecc16326a4cdd712d15b0e4..08eb04c787e834e52e7850b7b864f1901d684f31 100644 (file)
@@ -745,6 +745,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Hewlett-Packard HP t5740e Thin Client",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+               },
+       },
        {
                .callback = intel_no_lvds_dmi_callback,
                .ident = "Hewlett-Packard t5745",
index 8e79ff67ec98931e6a7680d8b9e28375d8db1978..d0ce2a5b1d3f09ffa65340026388f5b5920e320a 100644 (file)
@@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev)
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 swreq;
+       u32 limits;
 
-       swreq = (val & 0x3ff) << 25;
-       I915_WRITE(GEN6_RPNSWREQ, swreq);
+       limits = 0;
+       if (val >= dev_priv->max_delay)
+               val = dev_priv->max_delay;
+       else
+               limits |= dev_priv->max_delay << 24;
+
+       if (val <= dev_priv->min_delay)
+               val = dev_priv->min_delay;
+       else
+               limits |= dev_priv->min_delay << 16;
+
+       if (val == dev_priv->cur_delay)
+               return;
+
+       I915_WRITE(GEN6_RPNSWREQ,
+                  GEN6_FREQUENCY(val) |
+                  GEN6_OFFSET(0) |
+                  GEN6_AGGRESSIVE_TURBO);
+
+       /* Make sure we continue to get interrupts
+        * until we hit the minimum or maximum frequencies.
+        */
+       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+       dev_priv->cur_delay = val;
 }
 
 void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev)
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
        struct intel_ring_buffer *ring;
-       u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-       u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+       u32 rp_state_cap;
+       u32 gt_perf_status;
        u32 pcu_mbox, rc6_mask = 0;
        u32 gtfifodbg;
-       int cur_freq, min_freq, max_freq;
        int rc6_mode;
        int i;
 
@@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 
        gen6_gt_force_wake_get(dev_priv);
 
+       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+       gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+       /* In units of 100MHz */
+       dev_priv->max_delay = rp_state_cap & 0xff;
+       dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+       dev_priv->cur_delay = 0;
+
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
@@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  18 << 24 |
-                  6 << 16);
+                  dev_priv->max_delay << 24 |
+                  dev_priv->min_delay << 16);
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
        I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2408,7 +2438,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_MEDIA_HW_MODE |
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
                   GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
@@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
                     500))
                DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 
-       min_freq = (rp_state_cap & 0xff0000) >> 16;
-       max_freq = rp_state_cap & 0xff;
-       cur_freq = (gt_perf_status & 0xff00) >> 8;
-
        /* Check for overclock support */
        if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
                     500))
@@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
                     500))
                DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
        if (pcu_mbox & (1<<31)) { /* OC supported */
-               max_freq = pcu_mbox & 0xff;
+               dev_priv->max_delay = pcu_mbox & 0xff;
                DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
        }
 
-       /* In units of 100MHz */
-       dev_priv->max_delay = max_freq;
-       dev_priv->min_delay = min_freq;
-       dev_priv->cur_delay = cur_freq;
+       gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
        /* requires MSI enabled */
        I915_WRITE(GEN6_PMIER,
@@ -3580,8 +3603,9 @@ static void gen6_sanitize_pm(struct drm_device *dev)
                limits |= (dev_priv->min_delay & 0x3f) << 16;
 
        if (old != limits) {
-               DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
-                         limits, old);
+               /* Note that the known failure case is to read back 0. */
+               DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
+                                "expected %08x, was %08x\n", limits, old);
                I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
        }
 
index b59b6d5b75833e37e204da899c4dda2f65a9bf2d..e5b84ff89ca58234b5c7e0d30fa8e97583d6357d 100644 (file)
@@ -266,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
 
 static int init_ring_common(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj = ring->obj;
+       int ret = 0;
        u32 head;
 
+       if (HAS_FORCE_WAKE(dev))
+               gen6_gt_force_wake_get(dev_priv);
+
        /* Stop the ring if it's running. */
        I915_WRITE_CTL(ring, 0);
        I915_WRITE_HEAD(ring, 0);
@@ -317,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                                I915_READ_HEAD(ring),
                                I915_READ_TAIL(ring),
                                I915_READ_START(ring));
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -326,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                ring->head = I915_READ_HEAD(ring);
                ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
                ring->space = ring_space(ring);
+               ring->last_retired_head = -1;
        }
 
-       return 0;
+out:
+       if (HAS_FORCE_WAKE(dev))
+               gen6_gt_force_wake_put(dev_priv);
+
+       return ret;
 }
 
 static int
@@ -987,6 +998,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        if (ret)
                goto err_unref;
 
+       ret = i915_gem_object_set_to_gtt_domain(obj, true);
+       if (ret)
+               goto err_unpin;
+
        ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
                                         ring->size);
        if (ring->virtual_start == NULL) {
index a949b73880c8302db5f3b255429cf24ab12fea8d..b6a9d45fc3c69d4b5be7e8c6f93490636b6049c2 100644 (file)
@@ -783,10 +783,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
                ((v_sync_len & 0x30) >> 4);
 
        dtd->part2.dtd_flags = 0x18;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
        if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               dtd->part2.dtd_flags |= 0x2;
+               dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               dtd->part2.dtd_flags |= 0x4;
+               dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
        dtd->part2.sdvo_flags = 0;
        dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -820,9 +822,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
        mode->clock = dtd->part1.clock * 10;
 
        mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
-       if (dtd->part2.dtd_flags & 0x2)
+       if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PHSYNC;
-       if (dtd->part2.dtd_flags & 0x4)
+       if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PVSYNC;
 }
 
index 6b7b22f4d63ec77c05ebf70ee35110d9678b841e..9d030142ee43476c89cf2769f0d8c75cf1697daa 100644 (file)
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
        u16 output_flags;
 } __attribute__((packed));
 
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE     (1 << 7)
+
 /** This matches the EDID DTD structure, more or less */
 struct intel_sdvo_dtd {
        struct {
index 3346612d2953eff35c3e9025b044d33e75e28206..a233a51fd7e60c9f48ea89fd9b425140f4bf0ceb 100644 (file)
@@ -673,6 +673,54 @@ static const struct tv_mode tv_modes[] = {
 
                .filter_table = filter_table,
        },
+       {
+               .name       = "480p",
+               .clock          = 107520,
+               .refresh        = 59940,
+               .oversample     = TV_OVERSAMPLE_4X,
+               .component_only = 1,
+
+               .hsync_end      = 64,               .hblank_end         = 122,
+               .hblank_start   = 842,              .htotal             = 857,
+
+               .progressive    = true,             .trilevel_sync = false,
+
+               .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+               .vsync_len      = 12,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 44,               .vi_end_f2          = 44,
+               .nbr_end        = 479,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "576p",
+               .clock          = 107520,
+               .refresh        = 50000,
+               .oversample     = TV_OVERSAMPLE_4X,
+               .component_only = 1,
+
+               .hsync_end      = 64,               .hblank_end         = 139,
+               .hblank_start   = 859,              .htotal             = 863,
+
+               .progressive    = true,             .trilevel_sync = false,
+
+               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 48,               .vi_end_f2          = 48,
+               .nbr_end        = 575,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
        {
                .name       = "720p@60Hz",
                .clock          = 148800,
@@ -1194,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 
        I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
        I915_WRITE(TV_CTL, save_tv_ctl);
+       POSTING_READ(TV_CTL);
+
+       /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+       intel_wait_for_vblank(intel_tv->base.base.dev,
+                             to_intel_crtc(intel_tv->base.base.crtc)->pipe);
 
        /* Restore interrupt config */
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
index 3c8e04f54713b7964c5c915f3b06ca1a70f54578..93e832d6c3286346eff00f179fcc49e5caeba473 100644 (file)
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
 
 MODULE_DEVICE_TABLE(pci, pciidlist);
 
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+       struct apertures_struct *ap;
+       bool primary = false;
+
+       ap = alloc_apertures(1);
+       ap->ranges[0].base = pci_resource_start(pdev, 0);
+       ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+       primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+       remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+       kfree(ap);
+}
+
+
 static int __devinit
 mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       mgag200_kick_out_firmware_fb(pdev);
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
index 634d222c93dea4b310183a1174c474cd2f80a2b8..8613cb23808c585ef35175f4d4305f1c739d1add 100644 (file)
@@ -123,6 +123,9 @@ struct nouveau_bo {
 
        struct drm_gem_object *gem;
        int pin_refcnt;
+
+       struct ttm_bo_kmap_obj dma_buf_vmap;
+       int vmapping_count;
 };
 
 #define nouveau_bo_tile_layout(nvbo)                           \
index 153b9a15469b5053507507587bd20dc411417146..1074bc5dd418e3a33b9214969dd850d0f230464d 100644 (file)
@@ -467,7 +467,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
        nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
 
        ret = drm_fb_helper_init(dev, &nfbdev->helper,
-                                nv_two_heads(dev) ? 2 : 1, 4);
+                                dev->mode_config.num_crtc, 4);
        if (ret) {
                kfree(nfbdev);
                return ret;
index c58aab7370c575949a41113014120b252548169c..a25cf2cb931f104e7ce69c3de13696635290d196 100644 (file)
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
 
 #include "drmP.h"
 #include "drm.h"
@@ -61,6 +84,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
 
 }
 
+static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
+static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+       struct nouveau_bo *nvbo = dma_buf->priv;
+       struct drm_device *dev = nvbo->gem->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       if (nvbo->vmapping_count) {
+               nvbo->vmapping_count++;
+               goto out_unlock;
+       }
+
+       ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+                         &nvbo->dma_buf_vmap);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ERR_PTR(ret);
+       }
+       nvbo->vmapping_count = 1;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return nvbo->dma_buf_vmap.virtual;
+}
+
+static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+       struct nouveau_bo *nvbo = dma_buf->priv;
+       struct drm_device *dev = nvbo->gem->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       nvbo->vmapping_count--;
+       if (nvbo->vmapping_count == 0) {
+               ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
 static const struct dma_buf_ops nouveau_dmabuf_ops =  {
        .map_dma_buf = nouveau_gem_map_dma_buf,
        .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
@@ -69,6 +134,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops =  {
        .kmap_atomic = nouveau_gem_kmap_atomic,
        .kunmap = nouveau_gem_kunmap,
        .kunmap_atomic = nouveau_gem_kunmap_atomic,
+       .mmap = nouveau_gem_prime_mmap,
+       .vmap = nouveau_gem_prime_vmap,
+       .vunmap = nouveau_gem_prime_vunmap,
 };
 
 static int
index 01d77d1554f4258899da5e14eb0044e3d529cc02..3904d7964a4b02e3b0e34930f3bc97a65c07add3 100644 (file)
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        }
 
        if (tiling_flags & RADEON_TILING_MACRO) {
-               if (rdev->family >= CHIP_CAYMAN)
+               if (rdev->family >= CHIP_TAHITI)
+                       tmp = rdev->config.si.tile_config;
+               else if (rdev->family >= CHIP_CAYMAN)
                        tmp = rdev->config.cayman.tile_config;
                else
                        tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        } else if (tiling_flags & RADEON_TILING_MICRO)
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
+       if ((rdev->family == CHIP_TAHITI) ||
+           (rdev->family == CHIP_PITCAIRN))
+               fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+       else if (rdev->family == CHIP_VERDE)
+               fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
        switch (radeon_crtc->crtc_id) {
        case 0:
                WREG32(AVIVO_D1VGA_CONTROL, 0);
index e7b1ec5ae8c6207adf567f4942b422b1ae2cf140..486ccdf4aacda9b66681346e744b463bea046089 100644 (file)
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
                r600_hdmi_enable(encoder);
-               if (ASIC_IS_DCE4(rdev))
+               if (ASIC_IS_DCE6(rdev))
+                       ; /* TODO (use pointers instead of if-s?) */
+               else if (ASIC_IS_DCE4(rdev))
                        evergreen_hdmi_setmode(encoder, adjusted_mode);
                else
                        r600_hdmi_setmode(encoder, adjusted_mode);
index 58991af90502dea8b98d9aac157d182a5feca21e..7fb3d2e0434c71d52725633573bc3f17a34a010d 100644 (file)
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
                WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
                WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
                WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+               if ((rdev->family == CHIP_JUNIPER) ||
+                   (rdev->family == CHIP_CYPRESS) ||
+                   (rdev->family == CHIP_HEMLOCK) ||
+                   (rdev->family == CHIP_BARTS))
+                       WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
        }
        WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
 /*
  * Core functions
  */
-static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                                 u32 num_tile_pipes,
-                                                 u32 num_backends,
-                                                 u32 backend_disable_mask)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask = 0;
-       u32 enabled_backends_count = 0;
-       u32 cur_pipe;
-       u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
-       u32 cur_backend = 0;
-       u32 i;
-       bool force_no_swizzle;
-
-       if (num_tile_pipes > EVERGREEN_MAX_PIPES)
-               num_tile_pipes = EVERGREEN_MAX_PIPES;
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_backends > EVERGREEN_MAX_BACKENDS)
-               num_backends = EVERGREEN_MAX_BACKENDS;
-       if (num_backends < 1)
-               num_backends = 1;
-
-       for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
-               if (((backend_disable_mask >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends)
-                       break;
-       }
-
-       if (enabled_backends_count == 0) {
-               enabled_backends_mask = 1;
-               enabled_backends_count = 1;
-       }
-
-       if (enabled_backends_count != num_backends)
-               num_backends = enabled_backends_count;
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
-       switch (rdev->family) {
-       case CHIP_CEDAR:
-       case CHIP_REDWOOD:
-       case CHIP_PALM:
-       case CHIP_SUMO:
-       case CHIP_SUMO2:
-       case CHIP_TURKS:
-       case CHIP_CAICOS:
-               force_no_swizzle = false;
-               break;
-       case CHIP_CYPRESS:
-       case CHIP_HEMLOCK:
-       case CHIP_JUNIPER:
-       case CHIP_BARTS:
-       default:
-               force_no_swizzle = true;
-               break;
-       }
-       if (force_no_swizzle) {
-               bool last_backend_enabled = false;
-
-               force_no_swizzle = false;
-               for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
-                       if (((enabled_backends_mask >> i) & 1) == 1) {
-                               if (last_backend_enabled)
-                                       force_no_swizzle = true;
-                               last_backend_enabled = true;
-                       } else
-                               last_backend_enabled = false;
-               }
-       }
-
-       switch (num_tile_pipes) {
-       case 1:
-       case 3:
-       case 5:
-       case 7:
-               DRM_ERROR("odd number of pipes!\n");
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-                       swizzle_pipe[3] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 1;
-                       swizzle_pipe[5] = 3;
-                       swizzle_pipe[6] = 5;
-                       swizzle_pipe[7] = 7;
-               }
-               break;
-       }
-
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-
-               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-               cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
-       u32 cc_rb_backend_disable = 0;
-       u32 cc_gc_shader_pipe_config;
-       u32 gb_addr_config = 0;
+       u32 gb_addr_config;
        u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 gb_backend_map;
-       u32 grbm_gfx_index;
        u32 sx_debug_1;
        u32 smx_dc_ctl0;
        u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        u32 sq_stack_resource_mgmt_3;
        u32 vgt_cache_invalidation;
        u32 hdp_host_path_cntl, tmp;
+       u32 disabled_rb_mask;
        int i, j, num_shader_engines, ps_thread_count;
 
        switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_JUNIPER:
                rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_REDWOOD:
                rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_CEDAR:
        default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_PALM:
                rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_SUMO:
                rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_SUMO2:
                rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_BARTS:
                rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_TURKS:
                rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_CAICOS:
                rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
                break;
        }
 
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
-
-       cc_gc_shader_pipe_config |=
-               INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
-                                 & EVERGREEN_MAX_PIPES_MASK);
-       cc_gc_shader_pipe_config |=
-               INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
-                              & EVERGREEN_MAX_SIMDS_MASK);
-
-       cc_rb_backend_disable =
-               BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
-                               & EVERGREEN_MAX_BACKENDS_MASK);
-
-
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        if ((rdev->family == CHIP_PALM) ||
            (rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        else
                mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-       switch (rdev->config.evergreen.max_tile_pipes) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_PIPES(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_PIPES(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_PIPES(2);
-               break;
-       case 8:
-               gb_addr_config |= NUM_PIPES(3);
-               break;
-       }
-
-       gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-       gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
-       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
-       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
-       gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
-       gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-
-       if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
-               gb_addr_config |= ROW_SIZE(2);
-       else
-               gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
-
-       if (rdev->ddev->pdev->device == 0x689e) {
-               u32 efuse_straps_4;
-               u32 efuse_straps_3;
-               u8 efuse_box_bit_131_124;
-
-               WREG32(RCU_IND_INDEX, 0x204);
-               efuse_straps_4 = RREG32(RCU_IND_DATA);
-               WREG32(RCU_IND_INDEX, 0x203);
-               efuse_straps_3 = RREG32(RCU_IND_DATA);
-               efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
-
-               switch(efuse_box_bit_131_124) {
-               case 0x00:
-                       gb_backend_map = 0x76543210;
-                       break;
-               case 0x55:
-                       gb_backend_map = 0x77553311;
-                       break;
-               case 0x56:
-                       gb_backend_map = 0x77553300;
-                       break;
-               case 0x59:
-                       gb_backend_map = 0x77552211;
-                       break;
-               case 0x66:
-                       gb_backend_map = 0x77443300;
-                       break;
-               case 0x99:
-                       gb_backend_map = 0x66552211;
-                       break;
-               case 0x5a:
-                       gb_backend_map = 0x77552200;
-                       break;
-               case 0xaa:
-                       gb_backend_map = 0x66442200;
-                       break;
-               case 0x95:
-                       gb_backend_map = 0x66553311;
-                       break;
-               default:
-                       DRM_ERROR("bad backend map, using default\n");
-                       gb_backend_map =
-                               evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                                      rdev->config.evergreen.max_tile_pipes,
-                                                                      rdev->config.evergreen.max_backends,
-                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                  rdev->config.evergreen.max_backends) &
-                                                                       EVERGREEN_MAX_BACKENDS_MASK));
-                       break;
-               }
-       } else if (rdev->ddev->pdev->device == 0x68b9) {
-               u32 efuse_straps_3;
-               u8 efuse_box_bit_127_124;
-
-               WREG32(RCU_IND_INDEX, 0x203);
-               efuse_straps_3 = RREG32(RCU_IND_DATA);
-               efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
-
-               switch(efuse_box_bit_127_124) {
-               case 0x0:
-                       gb_backend_map = 0x00003210;
-                       break;
-               case 0x5:
-               case 0x6:
-               case 0x9:
-               case 0xa:
-                       gb_backend_map = 0x00003311;
-                       break;
-               default:
-                       DRM_ERROR("bad backend map, using default\n");
-                       gb_backend_map =
-                               evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                                      rdev->config.evergreen.max_tile_pipes,
-                                                                      rdev->config.evergreen.max_backends,
-                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                  rdev->config.evergreen.max_backends) &
-                                                                       EVERGREEN_MAX_BACKENDS_MASK));
-                       break;
-               }
-       } else {
-               switch (rdev->family) {
-               case CHIP_CYPRESS:
-               case CHIP_HEMLOCK:
-               case CHIP_BARTS:
-                       gb_backend_map = 0x66442200;
-                       break;
-               case CHIP_JUNIPER:
-                       gb_backend_map = 0x00002200;
-                       break;
-               default:
-                       gb_backend_map =
-                               evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                                      rdev->config.evergreen.max_tile_pipes,
-                                                                      rdev->config.evergreen.max_backends,
-                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                        rdev->config.evergreen.max_backends) &
-                                                                       EVERGREEN_MAX_BACKENDS_MASK));
-               }
-       }
-
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
         * not have bank info, so create a custom tiling dword.
         * bits 3:0   num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
        if (rdev->flags & RADEON_IS_IGP)
                rdev->config.evergreen.tile_config |= 1 << 4;
-       else
-               rdev->config.evergreen.tile_config |=
-                       ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
-       rdev->config.evergreen.tile_config |=
-               ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+       else {
+               if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+                       rdev->config.evergreen.tile_config |= 1 << 4;
+               else
+                       rdev->config.evergreen.tile_config |= 0 << 4;
+       }
+       rdev->config.evergreen.tile_config |= 0 << 8;
        rdev->config.evergreen.tile_config |=
                ((gb_addr_config & 0x30000000) >> 28) << 12;
 
-       rdev->config.evergreen.backend_map = gb_backend_map;
-       WREG32(GB_BACKEND_MAP, gb_backend_map);
-       WREG32(GB_ADDR_CONFIG, gb_addr_config);
-       WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
-       WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
 
-       num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
-       grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
-
-       for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
-               u32 rb = cc_rb_backend_disable | (0xf0 << 16);
-               u32 sp = cc_gc_shader_pipe_config;
-               u32 gfx = grbm_gfx_index | SE_INDEX(i);
+       if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+               u32 efuse_straps_4;
+               u32 efuse_straps_3;
 
-               if (i == num_shader_engines) {
-                       rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
-                       sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+               WREG32(RCU_IND_INDEX, 0x204);
+               efuse_straps_4 = RREG32(RCU_IND_DATA);
+               WREG32(RCU_IND_INDEX, 0x203);
+               efuse_straps_3 = RREG32(RCU_IND_DATA);
+               tmp = (((efuse_straps_4 & 0xf) << 4) |
+                     ((efuse_straps_3 & 0xf0000000) >> 28));
+       } else {
+               tmp = 0;
+               for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+                       u32 rb_disable_bitmap;
+
+                       WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+                       WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+                       rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+                       tmp <<= 4;
+                       tmp |= rb_disable_bitmap;
                }
+       }
+       /* enabled rb are just the one not disabled :) */
+       disabled_rb_mask = tmp;
 
-               WREG32(GRBM_GFX_INDEX, gfx);
-               WREG32(RLC_GFX_INDEX, gfx);
+       WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+       WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
-               WREG32(CC_RB_BACKEND_DISABLE, rb);
-               WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
-               WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
-               WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
-        }
+       WREG32(GB_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+       WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       grbm_gfx_index |= SE_BROADCAST_WRITES;
-       WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
-       WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+       tmp = gb_addr_config & NUM_PIPES_MASK;
+       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+                                       EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+       WREG32(GB_BACKEND_MAP, tmp);
 
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
@@ -2202,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
        WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
+       if (rdev->family <= CHIP_SUMO2)
+               WREG32(SMX_SAR_CTL0, 0x00010000);
+
        WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
                                        POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
                                        SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
index 4e7dd2b4843d94b0b88c778aca8fa08c7f089c61..c16554122ccd0fb482aa2f03bc93e8b47af55430 100644 (file)
@@ -52,6 +52,7 @@ struct evergreen_cs_track {
        u32                     cb_color_view[12];
        u32                     cb_color_pitch[12];
        u32                     cb_color_slice[12];
+       u32                     cb_color_slice_idx[12];
        u32                     cb_color_attrib[12];
        u32                     cb_color_cmask_slice[8];/* unused */
        u32                     cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
                track->cb_color_info[i] = 0;
                track->cb_color_view[i] = 0xFFFFFFFF;
                track->cb_color_pitch[i] = 0;
-               track->cb_color_slice[i] = 0;
+               track->cb_color_slice[i] = 0xfffffff;
+               track->cb_color_slice_idx[i] = 0;
        }
        track->cb_target_mask = 0xFFFFFFFF;
        track->cb_shader_mask = 0xFFFFFFFF;
        track->cb_dirty = true;
 
+       track->db_depth_slice = 0xffffffff;
        track->db_depth_view = 0xFFFFC000;
        track->db_depth_size = 0xFFFFFFFF;
        track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
 {
        struct evergreen_cs_track *track = p->track;
        unsigned palign, halign, tileb, slice_pt;
+       unsigned mtile_pr, mtile_ps, mtileb;
 
        tileb = 64 * surf->bpe * surf->nsamples;
-       palign = track->group_size / (8 * surf->bpe * surf->nsamples);
-       palign = MAX(8, palign);
        slice_pt = 1;
        if (tileb > surf->tsplit) {
                slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
        /* macro tile width & height */
        palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
        halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
-       surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
+       mtileb = (palign / 8) * (halign / 8) * tileb;;
+       mtile_pr = surf->nbx / palign;
+       mtile_ps = (mtile_pr * surf->nby) / halign;
+       surf->layer_size = mtile_ps * mtileb * slice_pt;
        surf->base_align = (palign / 8) * (halign / 8) * tileb;
        surf->palign = palign;
        surf->halign = halign;
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
 
        offset += surf.layer_size * mslice;
        if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+               /* old ddx are broken they allocate bo with w*h*bpp but
+                * program slice with ALIGN(h, 8), catch this and patch
+                * command stream.
+                */
+               if (!surf.mode) {
+                       volatile u32 *ib = p->ib.ptr;
+                       unsigned long tmp, nby, bsize, size, min = 0;
+
+                       /* find the height the ddx wants */
+                       if (surf.nby > 8) {
+                               min = surf.nby - 8;
+                       }
+                       bsize = radeon_bo_size(track->cb_color_bo[id]);
+                       tmp = track->cb_color_bo_offset[id] << 8;
+                       for (nby = surf.nby; nby > min; nby--) {
+                               size = nby * surf.nbx * surf.bpe * surf.nsamples;
+                               if ((tmp + size * mslice) <= bsize) {
+                                       break;
+                               }
+                       }
+                       if (nby > min) {
+                               surf.nby = nby;
+                               slice = ((nby * surf.nbx) / 64) - 1;
+                               if (!evergreen_surface_check(p, &surf, "cb")) {
+                                       /* check if this one works */
+                                       tmp += surf.layer_size * mslice;
+                                       if (tmp <= bsize) {
+                                               ib[track->cb_color_slice_idx[id]] = slice;
+                                               goto old_ddx_ok;
+                                       }
+                               }
+                       }
+               }
                dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
                         "offset %d, max layer %d, bo size %ld, slice %d)\n",
                         __func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
                        surf.tsplit, surf.mtilea);
                return -EINVAL;
        }
+old_ddx_ok:
 
        return 0;
 }
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR7_SLICE:
                tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
                track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+               track->cb_color_slice_idx[tmp] = idx;
                track->cb_dirty = true;
                break;
        case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR11_SLICE:
                tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
                track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+               track->cb_color_slice_idx[tmp] = idx;
                track->cb_dirty = true;
                break;
        case CB_COLOR0_ATTRIB:
index a51f880985f8baa8acb38082070aa34c6a65910a..65c54160028b2833c3a9576f57e7081506cd0260 100644 (file)
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
-               return;
-
        /* Silent, r600_hdmi_enable will raise WARN for us */
        if (!dig->afmt->enabled)
                return;
index 79130bfd1d6f058750b16a4c8f09eaf85ec992bf..b50b15c7049839c0b7a04d2dd33acff9fd19a27a 100644 (file)
 #define EVERGREEN_MAX_PIPES_MASK        0xFF
 #define EVERGREEN_MAX_LDS_NUM           0xFFFF
 
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN        0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN          0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN         0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+
 /* Registers */
 
 #define RCU_IND_INDEX                                  0x100
@@ -54,6 +63,7 @@
 #define                BACKEND_DISABLE(x)                      ((x) << 16)
 #define GB_ADDR_CONFIG                                 0x98F8
 #define                NUM_PIPES(x)                            ((x) << 0)
+#define                NUM_PIPES_MASK                          0x0000000f
 #define                PIPE_INTERLEAVE_SIZE(x)                 ((x) << 4)
 #define                BANK_INTERLEAVE_SIZE(x)                 ((x) << 8)
 #define                NUM_SHADER_ENGINES(x)                   ((x) << 12)
 #define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
 #define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
 #define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+#define        MC_VM_MD_L1_TLB3_CNTL                           0x2698
 
 #define        FUS_MC_VM_MD_L1_TLB0_CNTL                       0x265C
 #define        FUS_MC_VM_MD_L1_TLB1_CNTL                       0x2660
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 
+#define        SMX_SAR_CTL0                                    0xA008
 #define        SMX_DC_CTL0                                     0xA020
 #define                USE_HASH_FUNCTION                               (1 << 0)
 #define                NUMBER_OF_SETS(x)                               ((x) << 1)
index b01c2dd627b0e9a345e2dc8964d415ba37deffff..b7bf18e40215c3afe45766457fa9fe802f217fae 100644 (file)
@@ -417,215 +417,17 @@ out:
 /*
  * Core functions
  */
-static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                              u32 num_tile_pipes,
-                                              u32 num_backends_per_asic,
-                                              u32 *backend_disable_mask_per_asic,
-                                              u32 num_shader_engines)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask = 0;
-       u32 enabled_backends_count = 0;
-       u32 num_backends_per_se;
-       u32 cur_pipe;
-       u32 swizzle_pipe[CAYMAN_MAX_PIPES];
-       u32 cur_backend = 0;
-       u32 i;
-       bool force_no_swizzle;
-
-       /* force legal values */
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
-               num_tile_pipes = rdev->config.cayman.max_tile_pipes;
-       if (num_shader_engines < 1)
-               num_shader_engines = 1;
-       if (num_shader_engines > rdev->config.cayman.max_shader_engines)
-               num_shader_engines = rdev->config.cayman.max_shader_engines;
-       if (num_backends_per_asic < num_shader_engines)
-               num_backends_per_asic = num_shader_engines;
-       if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
-               num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
-
-       /* make sure we have the same number of backends per se */
-       num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
-       /* set up the number of backends per se */
-       num_backends_per_se = num_backends_per_asic / num_shader_engines;
-       if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
-               num_backends_per_se = rdev->config.cayman.max_backends_per_se;
-               num_backends_per_asic = num_backends_per_se * num_shader_engines;
-       }
-
-       /* create enable mask and count for enabled backends */
-       for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-               if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends_per_asic)
-                       break;
-       }
-
-       /* force the backends mask to match the current number of backends */
-       if (enabled_backends_count != num_backends_per_asic) {
-               u32 this_backend_enabled;
-               u32 shader_engine;
-               u32 backend_per_se;
-
-               enabled_backends_mask = 0;
-               enabled_backends_count = 0;
-               *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
-               for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-                       /* calc the current se */
-                       shader_engine = i / rdev->config.cayman.max_backends_per_se;
-                       /* calc the backend per se */
-                       backend_per_se = i % rdev->config.cayman.max_backends_per_se;
-                       /* default to not enabled */
-                       this_backend_enabled = 0;
-                       if ((shader_engine < num_shader_engines) &&
-                           (backend_per_se < num_backends_per_se))
-                               this_backend_enabled = 1;
-                       if (this_backend_enabled) {
-                               enabled_backends_mask |= (1 << i);
-                               *backend_disable_mask_per_asic &= ~(1 << i);
-                               ++enabled_backends_count;
-                       }
-               }
-       }
-
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
-       switch (rdev->family) {
-       case CHIP_CAYMAN:
-       case CHIP_ARUBA:
-               force_no_swizzle = true;
-               break;
-       default:
-               force_no_swizzle = false;
-               break;
-       }
-       if (force_no_swizzle) {
-               bool last_backend_enabled = false;
-
-               force_no_swizzle = false;
-               for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-                       if (((enabled_backends_mask >> i) & 1) == 1) {
-                               if (last_backend_enabled)
-                                       force_no_swizzle = true;
-                               last_backend_enabled = true;
-                       } else
-                               last_backend_enabled = false;
-               }
-       }
-
-       switch (num_tile_pipes) {
-       case 1:
-       case 3:
-       case 5:
-       case 7:
-               DRM_ERROR("odd number of pipes!\n");
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-                       swizzle_pipe[3] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 1;
-                       swizzle_pipe[5] = 3;
-                       swizzle_pipe[6] = 5;
-                       swizzle_pipe[7] = 7;
-               }
-               break;
-       }
-
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-
-               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-               cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
-static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
-                                           u32 disable_mask_per_se,
-                                           u32 max_disable_mask_per_se,
-                                           u32 num_shader_engines)
-{
-       u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
-       u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
-       if (num_shader_engines == 1)
-               return disable_mask_per_asic;
-       else if (num_shader_engines == 2)
-               return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
-       else
-               return 0xffffffff;
-}
-
 static void cayman_gpu_init(struct radeon_device *rdev)
 {
-       u32 cc_rb_backend_disable = 0;
-       u32 cc_gc_shader_pipe_config;
        u32 gb_addr_config = 0;
        u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 gb_backend_map;
        u32 cgts_tcc_disable;
        u32 sx_debug_1;
        u32 smx_dc_ctl0;
-       u32 gc_user_shader_pipe_config;
-       u32 gc_user_rb_backend_disable;
-       u32 cgts_user_tcc_disable;
        u32 cgts_sm_ctrl_reg;
        u32 hdp_host_path_cntl;
        u32 tmp;
+       u32 disabled_rb_mask;
        int i, j;
 
        switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.sc_prim_fifo_size = 0x100;
                rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_ARUBA:
        default:
@@ -657,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.max_pipes_per_simd = 4;
                rdev->config.cayman.max_tile_pipes = 2;
                if ((rdev->pdev->device == 0x9900) ||
-                   (rdev->pdev->device == 0x9901)) {
+                   (rdev->pdev->device == 0x9901) ||
+                   (rdev->pdev->device == 0x9905) ||
+                   (rdev->pdev->device == 0x9906) ||
+                   (rdev->pdev->device == 0x9907) ||
+                   (rdev->pdev->device == 0x9908) ||
+                   (rdev->pdev->device == 0x9909) ||
+                   (rdev->pdev->device == 0x9910) ||
+                   (rdev->pdev->device == 0x9917)) {
                        rdev->config.cayman.max_simds_per_se = 6;
                        rdev->config.cayman.max_backends_per_se = 2;
                } else if ((rdev->pdev->device == 0x9903) ||
-                          (rdev->pdev->device == 0x9904)) {
+                          (rdev->pdev->device == 0x9904) ||
+                          (rdev->pdev->device == 0x990A) ||
+                          (rdev->pdev->device == 0x9913) ||
+                          (rdev->pdev->device == 0x9918)) {
                        rdev->config.cayman.max_simds_per_se = 4;
                        rdev->config.cayman.max_backends_per_se = 2;
-               } else if ((rdev->pdev->device == 0x9990) ||
-                          (rdev->pdev->device == 0x9991)) {
+               } else if ((rdev->pdev->device == 0x9919) ||
+                          (rdev->pdev->device == 0x9990) ||
+                          (rdev->pdev->device == 0x9991) ||
+                          (rdev->pdev->device == 0x9994) ||
+                          (rdev->pdev->device == 0x99A0)) {
                        rdev->config.cayman.max_simds_per_se = 3;
                        rdev->config.cayman.max_backends_per_se = 1;
                } else {
@@ -687,6 +503,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.sc_prim_fifo_size = 0x40;
                rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
                break;
        }
 
@@ -706,39 +523,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
-       cgts_tcc_disable = 0xffff0000;
-       for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
-               cgts_tcc_disable &= ~(1 << (16 + i));
-       gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
-       gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
-       cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
-       rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
-       tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
-       rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
-       rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
-       tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
-       rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
-       tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
-       tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.cayman.backend_disable_mask_per_asic =
-               cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
-                                                rdev->config.cayman.num_shader_engines);
-       rdev->config.cayman.backend_map =
-               cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
-                                                   rdev->config.cayman.num_backends_per_se *
-                                                   rdev->config.cayman.num_shader_engines,
-                                                   &rdev->config.cayman.backend_disable_mask_per_asic,
-                                                   rdev->config.cayman.num_shader_engines);
-       tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
-       rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
-       tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
-       rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
-       if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
-               rdev->config.cayman.mem_max_burst_length_bytes = 512;
        tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
        rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
        if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +532,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        rdev->config.cayman.num_gpus = 1;
        rdev->config.cayman.multi_gpu_tile_size = 64;
 
-       //gb_addr_config = 0x02011003
-#if 0
-       gb_addr_config = RREG32(GB_ADDR_CONFIG);
-#else
-       gb_addr_config = 0;
-       switch (rdev->config.cayman.num_tile_pipes) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_PIPES(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_PIPES(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_PIPES(2);
-               break;
-       case 8:
-               gb_addr_config |= NUM_PIPES(3);
-               break;
-       }
-
-       tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
-       gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
-       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
-       tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
-       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
-       switch (rdev->config.cayman.num_gpus) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_GPUS(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_GPUS(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_GPUS(2);
-               break;
-       }
-       switch (rdev->config.cayman.multi_gpu_tile_size) {
-       case 16:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
-               break;
-       case 32:
-       default:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
-               break;
-       case 64:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-               break;
-       case 128:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
-               break;
-       }
-       switch (rdev->config.cayman.mem_row_size_in_kb) {
-       case 1:
-       default:
-               gb_addr_config |= ROW_SIZE(0);
-               break;
-       case 2:
-               gb_addr_config |= ROW_SIZE(1);
-               break;
-       case 4:
-               gb_addr_config |= ROW_SIZE(2);
-               break;
-       }
-#endif
-
        tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
        rdev->config.cayman.num_tile_pipes = (1 << tmp);
        tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +545,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
        rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
 
-       //gb_backend_map = 0x76541032;
-#if 0
-       gb_backend_map = RREG32(GB_BACKEND_MAP);
-#else
-       gb_backend_map =
-               cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
-                                                   rdev->config.cayman.num_backends_per_se *
-                                                   rdev->config.cayman.num_shader_engines,
-                                                   &rdev->config.cayman.backend_disable_mask_per_asic,
-                                                   rdev->config.cayman.num_shader_engines);
-#endif
+
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
         * not have bank info, so create a custom tiling dword.
         * bits 3:0   num_pipes
@@ -865,34 +572,50 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
        if (rdev->flags & RADEON_IS_IGP)
-               rdev->config.evergreen.tile_config |= 1 << 4;
-       else
-               rdev->config.cayman.tile_config |=
-                       ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+               rdev->config.cayman.tile_config |= 1 << 4;
+       else {
+               if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+                       rdev->config.cayman.tile_config |= 1 << 4;
+               else
+                       rdev->config.cayman.tile_config |= 0 << 4;
+       }
        rdev->config.cayman.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.cayman.tile_config |=
                ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
-       rdev->config.cayman.backend_map = gb_backend_map;
-       WREG32(GB_BACKEND_MAP, gb_backend_map);
+       tmp = 0;
+       for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+               u32 rb_disable_bitmap;
+
+               WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+               WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+               rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+               tmp <<= 4;
+               tmp |= rb_disable_bitmap;
+       }
+       /* enabled rb are just the one not disabled :) */
+       disabled_rb_mask = tmp;
+
+       WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+       WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       /* primary versions */
-       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+       tmp = gb_addr_config & NUM_PIPES_MASK;
+       tmp = r6xx_remap_render_backend(rdev, tmp,
+                                       rdev->config.cayman.max_backends_per_se *
+                                       rdev->config.cayman.max_shader_engines,
+                                       CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+       WREG32(GB_BACKEND_MAP, tmp);
 
+       cgts_tcc_disable = 0xffff0000;
+       for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+               cgts_tcc_disable &= ~(1 << (16 + i));
        WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
        WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
-
-       /* user versions */
-       WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
        WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
        WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
 
@@ -1580,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -1606,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
 
 int cayman_suspend(struct radeon_device *rdev)
 {
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        radeon_ib_pool_suspend(rdev);
        radeon_vm_manager_suspend(rdev);
index 2aa7046ada56af451726cccbb78a59b1a7bfaaa0..a0b98066e20796a180f429fce4ef2283e285667e 100644 (file)
@@ -41,6 +41,9 @@
 #define CAYMAN_MAX_TCC               16
 #define CAYMAN_MAX_TCC_MASK          0xFF
 
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN       0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN        0x12010001
+
 #define DMIF_ADDR_CONFIG                               0xBD4
 #define        SRBM_GFX_CNTL                                   0x0E44
 #define                RINGID(x)                                       (((x) & 0x3) << 0)
 #define        CGTS_SYS_TCC_DISABLE                            0x3F90
 #define        CGTS_USER_SYS_TCC_DISABLE                       0x3F94
 
+#define RLC_GFX_INDEX                                  0x3FC4
+
 #define        CONFIG_MEMSIZE                                  0x5428
 
 #define HDP_MEM_COHERENCY_FLUSH_CNTL                   0x5480
 #define                SOFT_RESET_VGT                                  (1 << 14)
 #define                SOFT_RESET_IA                                   (1 << 15)
 
+#define GRBM_GFX_INDEX                                 0x802C
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
+
 #define        SCRATCH_REG0                                    0x8500
 #define        SCRATCH_REG1                                    0x8504
 #define        SCRATCH_REG2                                    0x8508
index f388a1d73b635f6e385e9defbc34d0b022538c4d..bff62729381215996778ee493515bb9b6a6469f9 100644 (file)
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
        return r600_gpu_soft_reset(rdev);
 }
 
-static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
-                                            u32 num_backends,
-                                            u32 backend_disable_mask)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask;
-       u32 enabled_backends_count;
-       u32 cur_pipe;
-       u32 swizzle_pipe[R6XX_MAX_PIPES];
-       u32 cur_backend;
-       u32 i;
-
-       if (num_tile_pipes > R6XX_MAX_PIPES)
-               num_tile_pipes = R6XX_MAX_PIPES;
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_backends > R6XX_MAX_BACKENDS)
-               num_backends = R6XX_MAX_BACKENDS;
-       if (num_backends < 1)
-               num_backends = 1;
-
-       enabled_backends_mask = 0;
-       enabled_backends_count = 0;
-       for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
-               if (((backend_disable_mask >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends)
-                       break;
-       }
-
-       if (enabled_backends_count == 0) {
-               enabled_backends_mask = 1;
-               enabled_backends_count = 1;
-       }
-
-       if (enabled_backends_count != num_backends)
-               num_backends = enabled_backends_count;
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
-       switch (num_tile_pipes) {
-       case 1:
-               swizzle_pipe[0] = 0;
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 3:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               swizzle_pipe[2] = 2;
-               break;
-       case 4:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               swizzle_pipe[2] = 2;
-               swizzle_pipe[3] = 3;
-               break;
-       case 5:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               swizzle_pipe[2] = 2;
-               swizzle_pipe[3] = 3;
-               swizzle_pipe[4] = 4;
-               break;
-       case 6:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 2;
-               swizzle_pipe[2] = 4;
-               swizzle_pipe[3] = 5;
-               swizzle_pipe[4] = 1;
-               swizzle_pipe[5] = 3;
-               break;
-       case 7:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 2;
-               swizzle_pipe[2] = 4;
-               swizzle_pipe[3] = 6;
-               swizzle_pipe[4] = 1;
-               swizzle_pipe[5] = 3;
-               swizzle_pipe[6] = 5;
-               break;
-       case 8:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 2;
-               swizzle_pipe[2] = 4;
-               swizzle_pipe[3] = 6;
-               swizzle_pipe[4] = 1;
-               swizzle_pipe[5] = 3;
-               swizzle_pipe[6] = 5;
-               swizzle_pipe[7] = 7;
-               break;
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+                             u32 tiling_pipe_num,
+                             u32 max_rb_num,
+                             u32 total_max_rb_num,
+                             u32 disabled_rb_mask)
+{
+       u32 rendering_pipe_num, rb_num_width, req_rb_num;
+       u32 pipe_rb_ratio, pipe_rb_remain;
+       u32 data = 0, mask = 1 << (max_rb_num - 1);
+       unsigned i, j;
+
+       /* mask out the RBs that don't exist on that asic */
+       disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+
+       rendering_pipe_num = 1 << tiling_pipe_num;
+       req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+       BUG_ON(rendering_pipe_num < req_rb_num);
+
+       pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+       pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+       if (rdev->family <= CHIP_RV740) {
+               /* r6xx/r7xx */
+               rb_num_width = 2;
+       } else {
+               /* eg+ */
+               rb_num_width = 4;
        }
 
-       cur_backend = 0;
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-
-               backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
-               cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+       for (i = 0; i < max_rb_num; i++) {
+               if (!(mask & disabled_rb_mask)) {
+                       for (j = 0; j < pipe_rb_ratio; j++) {
+                               data <<= rb_num_width;
+                               data |= max_rb_num - i - 1;
+                       }
+                       if (pipe_rb_remain) {
+                               data <<= rb_num_width;
+                               data |= max_rb_num - i - 1;
+                               pipe_rb_remain--;
+                       }
+               }
+               mask >>= 1;
        }
 
-       return backend_map;
+       return data;
 }
 
 int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
 {
        u32 tiling_config;
        u32 ramcfg;
-       u32 backend_map;
        u32 cc_rb_backend_disable;
        u32 cc_gc_shader_pipe_config;
        u32 tmp;
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
        u32 sq_thread_resource_mgmt = 0;
        u32 sq_stack_resource_mgmt_1 = 0;
        u32 sq_stack_resource_mgmt_2 = 0;
+       u32 disabled_rb_mask;
 
-       /* FIXME: implement */
+       rdev->config.r600.tiling_group_size = 256;
        switch (rdev->family) {
        case CHIP_R600:
                rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
        rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-       if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-               rdev->config.r600.tiling_group_size = 512;
-       else
-               rdev->config.r600.tiling_group_size = 256;
+
        tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
        if (tmp > 3) {
                tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
        tiling_config |= BANK_SWAPS(1);
 
        cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       cc_rb_backend_disable |=
-               BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-       cc_gc_shader_pipe_config |=
-               INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
-       cc_gc_shader_pipe_config |=
-               INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-
-       backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
-                                                       (R6XX_MAX_BACKENDS -
-                                                        r600_count_pipe_bits((cc_rb_backend_disable &
-                                                                              R6XX_MAX_BACKENDS_MASK) >> 16)),
-                                                       (cc_rb_backend_disable >> 16));
+       tmp = R6XX_MAX_BACKENDS -
+               r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+       if (tmp < rdev->config.r600.max_backends) {
+               rdev->config.r600.max_backends = tmp;
+       }
+
+       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+       tmp = R6XX_MAX_PIPES -
+               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+       if (tmp < rdev->config.r600.max_pipes) {
+               rdev->config.r600.max_pipes = tmp;
+       }
+       tmp = R6XX_MAX_SIMDS -
+               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+       if (tmp < rdev->config.r600.max_simds) {
+               rdev->config.r600.max_simds = tmp;
+       }
+
+       disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+       tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+                                       R6XX_MAX_BACKENDS, disabled_rb_mask);
+       tiling_config |= tmp << 16;
+       rdev->config.r600.backend_map = tmp;
+
        rdev->config.r600.tile_config = tiling_config;
-       rdev->config.r600.backend_map = backend_map;
-       tiling_config |= BACKEND_MAP(backend_map);
        WREG32(GB_TILING_CONFIG, tiling_config);
        WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
        WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
 
-       /* Setup pipes */
-       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
        tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
        WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1900,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
        WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
                               NUM_CLIP_SEQ(3)));
        WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+       WREG32(VC_ENHANCE, 0);
 }
 
 
@@ -2487,6 +2427,12 @@ int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -2523,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               DRM_ERROR("radeon: audio resume failed\n");
-               return r;
-       }
-
        return r;
 }
 
@@ -2638,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
                rdev->accel_working = false;
        }
 
-       r = r600_audio_init(rdev);
-       if (r)
-               return r; /* TODO error handling */
        return 0;
 }
 
index 7c4fa77f018f7380b0481f385eb8c123b807701b..79b55916cf90c0ddfe397c007a867715cf0fea33 100644 (file)
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
                || rdev->family == CHIP_RS600
                || rdev->family == CHIP_RS690
                || rdev->family == CHIP_RS740;
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
        int base_rate = 48000;
 
        switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
                WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
                WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
 
-               /* Some magic trigger or src sel? */
-               WREG32_P(0x5ac, 0x01, ~0x77);
+               /* Select DTO source */
+               WREG32(0x5ac, radeon_crtc->crtc_id);
        } else {
                switch (dig->dig_encoder) {
                case 0:
index 0133f5f09bd6c71cc95d2472beaacef51c3ffa2a..ca87f7afaf2374d02117ec91e479c1e5a385b8d2 100644 (file)
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                break;
+       case PACKET3_STRMOUT_BASE_UPDATE:
+               if (p->family < CHIP_RV770) {
+                       DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+                       return -EINVAL;
+               }
+               if (pkt->count != 1) {
+                       DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+                       return -EINVAL;
+               }
+               if (idx_value > 3) {
+                       DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+                       return -EINVAL;
+               }
+               {
+                       u64 offset;
+
+                       r = r600_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+                               return -EINVAL;
+                       }
+
+                       if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+                               return -EINVAL;
+                       }
+
+                       offset = radeon_get_ib_value(p, idx+1) << 8;
+                       if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+                                         offset, track->vgt_strmout_bo_offset[idx_value]);
+                               return -EINVAL;
+                       }
+
+                       if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+                                         offset + 4, radeon_bo_size(reloc->robj));
+                               return -EINVAL;
+                       }
+                       ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               }
+               break;
        case PACKET3_SURFACE_BASE_UPDATE:
                if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
                        DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
index 226379e00ac1de31bc7dcdb1781ff64ee4457b8c..82a0a4c919c027ea77308917a0309aa9a818e4d8 100644 (file)
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
-               return;
-
        /* Silent, r600_hdmi_enable will raise WARN for us */
        if (!dig->afmt->enabled)
                return;
@@ -348,7 +345,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
                WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
                       HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
                       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
-                      HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
                       HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
                       HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
        }
@@ -484,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        uint32_t offset;
        u32 hdmi;
 
-       if (ASIC_IS_DCE5(rdev))
+       if (ASIC_IS_DCE6(rdev))
                return;
 
        /* Silent, r600_hdmi_enable will raise WARN for us */
@@ -544,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
+       if (ASIC_IS_DCE6(rdev))
                return;
 
        /* Called for ATOM_ENCODER_MODE_HDMI only */
index 15bd3b216243c2207777d4ab20cb2e0fe091a232..025fd5b6c08c8c0d68bc93c990586cf2cc2b4689 100644 (file)
 #define                BACKEND_MAP(x)                                  ((x) << 16)
 
 #define GB_TILING_CONFIG                               0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
 
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                TC_L2_SIZE(x)                                   ((x)<<5)
 #define                L2_DISABLE_LATE_HIT                             (1<<9)
 
+#define        VC_ENHANCE                                      0x9714
 
 #define        VGT_CACHE_INVALIDATION                          0x88C4
 #define                CACHE_INVALIDATION(x)                           ((x)<<0)
 #define        PACKET3_SET_CTL_CONST                           0x6F
 #define                PACKET3_SET_CTL_CONST_OFFSET                    0x0003cff0
 #define                PACKET3_SET_CTL_CONST_END                       0x0003e200
+#define        PACKET3_STRMOUT_BASE_UPDATE                     0x72 /* r7xx */
 #define        PACKET3_SURFACE_BASE_UPDATE                     0x73
 
 
index 1dc3a4aba0205f5afccaea25d71a785868e3506b..fefcca55c1eb8bb76c7611cc0f38e5f04fe47765 100644 (file)
@@ -346,6 +346,9 @@ struct radeon_bo {
        /* Constant after initialization */
        struct radeon_device            *rdev;
        struct drm_gem_object           gem_base;
+
+       struct ttm_bo_kmap_obj dma_buf_vmap;
+       int vmapping_count;
 };
 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
@@ -848,7 +851,6 @@ struct radeon_cs_parser {
        s32                     priority;
 };
 
-extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
 extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
 
@@ -1372,9 +1374,9 @@ struct cayman_asic {
 
 struct si_asic {
        unsigned max_shader_engines;
-       unsigned max_pipes_per_simd;
        unsigned max_tile_pipes;
-       unsigned max_simds_per_se;
+       unsigned max_cu_per_sh;
+       unsigned max_sh_per_se;
        unsigned max_backends_per_se;
        unsigned max_texture_channel_caches;
        unsigned max_gprs;
@@ -1385,7 +1387,6 @@ struct si_asic {
        unsigned sc_hiz_tile_fifo_size;
        unsigned sc_earlyz_tile_fifo_size;
 
-       unsigned num_shader_engines;
        unsigned num_tile_pipes;
        unsigned num_backends_per_se;
        unsigned backend_disable_mask_per_asic;
@@ -1846,6 +1847,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
 extern void r600_hdmi_enable(struct drm_encoder *encoder);
 extern void r600_hdmi_disable(struct drm_encoder *encoder);
 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+                                    u32 tiling_pipe_num,
+                                    u32 max_rb_num,
+                                    u32 total_max_rb_num,
+                                    u32 enabled_rb_mask);
 
 /*
  * evergreen functions used by radeon_encoder.c
index f6e69b8c06c6110e3aecf80b41eb3b1ca6eb6968..b1e3820df36397e9c2ad7a7798be5412c0853df2 100644 (file)
@@ -444,7 +444,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
         */
        if ((dev->pdev->device == 0x9498) &&
            (dev->pdev->subsystem_vendor == 0x1682) &&
-           (dev->pdev->subsystem_device == 0x2452)) {
+           (dev->pdev->subsystem_device == 0x2452) &&
+           (i2c_bus->valid == false) &&
+           !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
                struct radeon_device *rdev = dev->dev_private;
                *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
        }
index c7d64a7390339e7b6fe29e27de06265843a865ff..142f89462aa4ddab99030f7b285a8f6b2e883925 100644 (file)
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
                                           sync_to_ring, p->ring);
 }
 
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 {
        struct drm_radeon_cs *cs = data;
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                }
        }
 
-       if ((p->cs_flags & RADEON_CS_USE_VM) &&
-           !p->rdev->vm_manager.enabled) {
-               DRM_ERROR("VM not active on asic!\n");
-               return -EINVAL;
-       }
-
-       /* we only support VM on SI+ */
-       if ((p->rdev->family >= CHIP_TAHITI) &&
-           ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
-               DRM_ERROR("VM required on SI+!\n");
-               return -EINVAL;
-       }
+       /* these are KMS only */
+       if (p->rdev) {
+               if ((p->cs_flags & RADEON_CS_USE_VM) &&
+                   !p->rdev->vm_manager.enabled) {
+                       DRM_ERROR("VM not active on asic!\n");
+                       return -EINVAL;
+               }
 
-       if (radeon_cs_get_ring(p, ring, priority))
-               return -EINVAL;
+               /* we only support VM on SI+ */
+               if ((p->rdev->family >= CHIP_TAHITI) &&
+                   ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+                       DRM_ERROR("VM required on SI+!\n");
+                       return -EINVAL;
+               }
 
+               if (radeon_cs_get_ring(p, ring, priority))
+                       return -EINVAL;
+       }
 
        /* deal with non-vm */
        if ((p->chunk_ib_idx != -1) &&
@@ -580,7 +583,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
        return 0;
 }
 
-int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
 {
        int new_page;
        struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
@@ -623,3 +626,28 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
 
        return new_page;
 }
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       u32 pg_idx, pg_offset;
+       u32 idx_value = 0;
+       int new_page;
+
+       pg_idx = (idx * 4) / PAGE_SIZE;
+       pg_offset = (idx * 4) % PAGE_SIZE;
+
+       if (ibc->kpage_idx[0] == pg_idx)
+               return ibc->kpage[0][pg_offset/4];
+       if (ibc->kpage_idx[1] == pg_idx)
+               return ibc->kpage[1][pg_offset/4];
+
+       new_page = radeon_cs_update_pages(p, pg_idx);
+       if (new_page < 0) {
+               p->parser_error = new_page;
+               return 0;
+       }
+
+       idx_value = ibc->kpage[new_page][pg_offset/4];
+       return idx_value;
+}
index f0bb2b543b13d2f5ab437c00efdc1a0ea52c663a..2c4d53fd20c5c1e010e0d297d62c591e6ff618b9 100644 (file)
  *   2.13.0 - virtual memory support, streamout
  *   2.14.0 - add evergreen tiling informations
  *   2.15.0 - add max_pipes query
+ *   2.16.0 - fix evergreen 2D tiled surface calculation
+ *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       15
+#define KMS_DRIVER_MINOR       17
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 79db56e6c2ac2ca66d48bc6519e6406ee1a477ae..84b648a7ddd8cf697fc07fd00a1e2eed39ff46bd 100644 (file)
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
        rdev->vm_manager.enabled = false;
 
        /* mark first vm as always in use, it's the system one */
+       /* allocate enough for 2 full VM pts */
        r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-                                     rdev->vm_manager.max_pfn * 8,
+                                     rdev->vm_manager.max_pfn * 8 * 2,
                                      RADEON_GEM_DOMAIN_VRAM);
        if (r) {
                dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,12 +477,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 
        mutex_lock(&vm->mutex);
        if (last_pfn > vm->last_pfn) {
-               /* grow va space 32M by 32M */
-               unsigned align = ((32 << 20) >> 12) - 1;
+               /* release mutex and lock in right order */
+               mutex_unlock(&vm->mutex);
                radeon_mutex_lock(&rdev->cs_mutex);
-               radeon_vm_unbind_locked(rdev, vm);
+               mutex_lock(&vm->mutex);
+               /* and check again */
+               if (last_pfn > vm->last_pfn) {
+                       /* grow va space 32M by 32M */
+                       unsigned align = ((32 << 20) >> 12) - 1;
+                       radeon_vm_unbind_locked(rdev, vm);
+                       vm->last_pfn = (last_pfn + align) & ~align;
+               }
                radeon_mutex_unlock(&rdev->cs_mutex);
-               vm->last_pfn = (last_pfn + align) & ~align;
        }
        head = &vm->va;
        last_offset = 0;
@@ -595,8 +602,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
        if (bo_va == NULL)
                return 0;
 
-       mutex_lock(&vm->mutex);
        radeon_mutex_lock(&rdev->cs_mutex);
+       mutex_lock(&vm->mutex);
        radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
        radeon_mutex_unlock(&rdev->cs_mutex);
        list_del(&bo_va->vm_list);
@@ -627,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);
-       vm->last_pfn = 0;
+       /* SI requires equal sized PTs for all VMs, so always set
+        * last_pfn to max_pfn.  cayman allows variable sized
+        * pts so we can grow then as needed.  Once we switch
+        * to two level pts we can unify this again.
+        */
+       if (rdev->family >= CHIP_TAHITI)
+               vm->last_pfn = rdev->vm_manager.max_pfn;
+       else
+               vm->last_pfn = 0;
        /* map the ib pool buffer at 0 in virtual address space, set
         * read only
         */
@@ -641,9 +656,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
        struct radeon_bo_va *bo_va, *tmp;
        int r;
 
-       mutex_lock(&vm->mutex);
-
        radeon_mutex_lock(&rdev->cs_mutex);
+       mutex_lock(&vm->mutex);
        radeon_vm_unbind_locked(rdev, vm);
        radeon_mutex_unlock(&rdev->cs_mutex);
 
index f28bd4b7ef980937c88eb54c30b5534adc5abf3b..21ec9f5653cedc94e729e521e8cf62e4b14c2884 100644 (file)
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_busy *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                break;
        }
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_wait_idle *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        robj = gem_to_radeon_bo(gobj);
        r = radeon_bo_wait(robj, NULL, false);
        /* callback hw specific functions if any */
-       if (robj->rdev->asic->ioctl_wait_idle)
-               robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+       if (rdev->asic->ioctl_wait_idle)
+               robj->rdev->asic->ioctl_wait_idle(rdev, robj);
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
index f1016a5820d1f0b0cc3e89195f4576b836e9a269..5c58d7d90cb2a356710d9f4a2a17fe450a1aee8d 100644 (file)
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                break;
        case RADEON_INFO_MAX_PIPES:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_pipes_per_simd;
+                       value = rdev->config.si.max_cu_per_sh;
                else if (rdev->family >= CHIP_CAYMAN)
                        value = rdev->config.cayman.max_pipes_per_simd;
                else if (rdev->family >= CHIP_CEDAR)
index 08825548ee69c487909e16019ddbfca0e7cbcb6a..5b37e283ec38575a86d59a695b1f453251f9ae43 100644 (file)
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
                int i;
 
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-                       not_processed += radeon_fence_count_emitted(rdev, i);
-                       if (not_processed >= 3)
-                               break;
+                       struct radeon_ring *ring = &rdev->ring[i];
+
+                       if (ring->ready) {
+                               not_processed += radeon_fence_count_emitted(rdev, i);
+                               if (not_processed >= 3)
+                                       break;
+                       }
                }
 
                if (not_processed >= 3) { /* should upclock */
index b8f835d8ecb4127a2c4b8771e1b74859f0561c5d..6bef46ace8315d7706998577fa507769b12c17a3 100644 (file)
@@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v
 
 }
 
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+       struct radeon_bo *bo = dma_buf->priv;
+       struct drm_device *dev = bo->rdev->ddev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       if (bo->vmapping_count) {
+               bo->vmapping_count++;
+               goto out_unlock;
+       }
+
+       ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+                         &bo->dma_buf_vmap);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ERR_PTR(ret);
+       }
+       bo->vmapping_count = 1;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+       struct radeon_bo *bo = dma_buf->priv;
+       struct drm_device *dev = bo->rdev->ddev;
+
+       mutex_lock(&dev->struct_mutex);
+       bo->vmapping_count--;
+       if (bo->vmapping_count == 0) {
+               ttm_bo_kunmap(&bo->dma_buf_vmap);
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
 const static struct dma_buf_ops radeon_dmabuf_ops =  {
        .map_dma_buf = radeon_gem_map_dma_buf,
        .unmap_dma_buf = radeon_gem_unmap_dma_buf,
@@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops =  {
        .kmap_atomic = radeon_gem_kmap_atomic,
        .kunmap = radeon_gem_kunmap,
        .kunmap_atomic = radeon_gem_kunmap_atomic,
+       .mmap = radeon_gem_prime_mmap,
+       .vmap = radeon_gem_prime_vmap,
+       .vunmap = radeon_gem_prime_vunmap,
 };
 
 static int radeon_prime_create(struct drm_device *dev,
@@ -125,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
        struct radeon_bo *bo = gem_to_radeon_bo(obj);
        int ret = 0;
 
+       ret = radeon_bo_reserve(bo, false);
+       if (unlikely(ret != 0))
+               return ERR_PTR(ret);
+
        /* pin buffer into GTT */
        ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
-       if (ret)
+       if (ret) {
+               radeon_bo_unreserve(bo);
                return ERR_PTR(ret);
-
+       }
+       radeon_bo_unreserve(bo);
        return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
 }
 
index 493a7be753065afb8b2aba0d080efd75927c4569..983658c91358939e0123a4545519d2d6c3b24f05 100644 (file)
  */
 int radeon_debugfs_sa_init(struct radeon_device *rdev);
 
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
-       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-       u32 pg_idx, pg_offset;
-       u32 idx_value = 0;
-       int new_page;
-
-       pg_idx = (idx * 4) / PAGE_SIZE;
-       pg_offset = (idx * 4) % PAGE_SIZE;
-
-       if (ibc->kpage_idx[0] == pg_idx)
-               return ibc->kpage[0][pg_offset/4];
-       if (ibc->kpage_idx[1] == pg_idx)
-               return ibc->kpage[1][pg_offset/4];
-
-       new_page = radeon_cs_update_pages(p, pg_idx);
-       if (new_page < 0) {
-               p->parser_error = new_page;
-               return 0;
-       }
-
-       idx_value = ibc->kpage[new_page][pg_offset/4];
-       return idx_value;
-}
-
 int radeon_ib_get(struct radeon_device *rdev, int ring,
                  struct radeon_ib *ib, unsigned size)
 {
index 25f9eef12c42a8caf6d05a0de3b37d5d6d6352a7..e95c5e61d4e2211a86859a0b0143cdc3139f8883 100644 (file)
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
-               return r;
-       }
-
        r = radeon_ib_pool_start(rdev);
        if (r)
                return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing audio\n");
+               return r;
+       }
+
        return 0;
 }
 
index 3277ddecfe9fbd7755ba3c6bde888ebb7df68e36..159b6a43fda06598c3f76ca28c12b337d53105e6 100644 (file)
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
-               return r;
-       }
-
        r = radeon_ib_pool_start(rdev);
        if (r)
                return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing audio\n");
+               return r;
+       }
+
        return 0;
 }
 
index c2f473bc13b85bf189b44d823d0ef193b94fd3c2..b4f51c569c369962c2afebdee3ded9b1f71679c5 100644 (file)
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+       if (rdev->family == CHIP_RV740)
+               WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
 /*
  * Core functions
  */
-static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                            u32 num_tile_pipes,
-                                            u32 num_backends,
-                                            u32 backend_disable_mask)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask;
-       u32 enabled_backends_count;
-       u32 cur_pipe;
-       u32 swizzle_pipe[R7XX_MAX_PIPES];
-       u32 cur_backend;
-       u32 i;
-       bool force_no_swizzle;
-
-       if (num_tile_pipes > R7XX_MAX_PIPES)
-               num_tile_pipes = R7XX_MAX_PIPES;
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_backends > R7XX_MAX_BACKENDS)
-               num_backends = R7XX_MAX_BACKENDS;
-       if (num_backends < 1)
-               num_backends = 1;
-
-       enabled_backends_mask = 0;
-       enabled_backends_count = 0;
-       for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
-               if (((backend_disable_mask >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends)
-                       break;
-       }
-
-       if (enabled_backends_count == 0) {
-               enabled_backends_mask = 1;
-               enabled_backends_count = 1;
-       }
-
-       if (enabled_backends_count != num_backends)
-               num_backends = enabled_backends_count;
-
-       switch (rdev->family) {
-       case CHIP_RV770:
-       case CHIP_RV730:
-               force_no_swizzle = false;
-               break;
-       case CHIP_RV710:
-       case CHIP_RV740:
-       default:
-               force_no_swizzle = true;
-               break;
-       }
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
-       switch (num_tile_pipes) {
-       case 1:
-               swizzle_pipe[0] = 0;
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 3:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-               }
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 3;
-                       swizzle_pipe[3] = 1;
-               }
-               break;
-       case 5:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 5;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 1;
-               }
-               break;
-       case 7:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 1;
-                       swizzle_pipe[6] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 1;
-                       swizzle_pipe[6] = 7;
-                       swizzle_pipe[7] = 5;
-               }
-               break;
-       }
-
-       cur_backend = 0;
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-
-               backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
-               cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
 static void rv770_gpu_init(struct radeon_device *rdev)
 {
        int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        u32 sq_thread_resource_mgmt;
        u32 hdp_host_path_cntl;
        u32 sq_dyn_gpr_size_simd_ab_0;
-       u32 backend_map;
        u32 gb_tiling_config = 0;
        u32 cc_rb_backend_disable = 0;
        u32 cc_gc_shader_pipe_config = 0;
        u32 mc_arb_ramcfg;
-       u32 db_debug4;
+       u32 db_debug4, tmp;
+       u32 inactive_pipes, shader_pipe_config;
+       u32 disabled_rb_mask;
+       unsigned active_number;
 
        /* setup chip specs */
+       rdev->config.rv770.tiling_group_size = 256;
        switch (rdev->family) {
        case CHIP_RV770:
                rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        /* setup tiling, simd, pipe config */
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
+       shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+       inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+       for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+               if (!(inactive_pipes & tmp)) {
+                       active_number++;
+               }
+               tmp <<= 1;
+       }
+       if (active_number == 1) {
+               WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+       } else {
+               WREG32(SPI_CONFIG_CNTL, 0);
+       }
+
+       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+       tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+       if (tmp < rdev->config.rv770.max_backends) {
+               rdev->config.rv770.max_backends = tmp;
+       }
+
+       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+       tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+       if (tmp < rdev->config.rv770.max_pipes) {
+               rdev->config.rv770.max_pipes = tmp;
+       }
+       tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+       if (tmp < rdev->config.rv770.max_simds) {
+               rdev->config.rv770.max_simds = tmp;
+       }
+
        switch (rdev->config.rv770.max_tile_pipes) {
        case 1:
        default:
-               gb_tiling_config |= PIPE_TILING(0);
+               gb_tiling_config = PIPE_TILING(0);
                break;
        case 2:
-               gb_tiling_config |= PIPE_TILING(1);
+               gb_tiling_config = PIPE_TILING(1);
                break;
        case 4:
-               gb_tiling_config |= PIPE_TILING(2);
+               gb_tiling_config = PIPE_TILING(2);
                break;
        case 8:
-               gb_tiling_config |= PIPE_TILING(3);
+               gb_tiling_config = PIPE_TILING(3);
                break;
        }
        rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
+       disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+       tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+                                       R7XX_MAX_BACKENDS, disabled_rb_mask);
+       gb_tiling_config |= tmp << 16;
+       rdev->config.rv770.backend_map = tmp;
+
        if (rdev->family == CHIP_RV770)
                gb_tiling_config |= BANK_TILING(1);
-       else
-               gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+       else {
+               if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+                       gb_tiling_config |= BANK_TILING(1);
+               else
+                       gb_tiling_config |= BANK_TILING(0);
+       }
        rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
        gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-       if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-               rdev->config.rv770.tiling_group_size = 512;
-       else
-               rdev->config.rv770.tiling_group_size = 256;
        if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
                gb_tiling_config |= ROW_TILING(3);
                gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        }
 
        gb_tiling_config |= BANK_SWAPS(1);
-
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       cc_rb_backend_disable |=
-               BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
-
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-       cc_gc_shader_pipe_config |=
-               INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
-       cc_gc_shader_pipe_config |=
-               INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
-
-       if (rdev->family == CHIP_RV740)
-               backend_map = 0x28;
-       else
-               backend_map = r700_get_tile_pipe_to_backend_map(rdev,
-                                                               rdev->config.rv770.max_tile_pipes,
-                                                               (R7XX_MAX_BACKENDS -
-                                                                r600_count_pipe_bits((cc_rb_backend_disable &
-                                                                                      R7XX_MAX_BACKENDS_MASK) >> 16)),
-                                                               (cc_rb_backend_disable >> 16));
-
        rdev->config.rv770.tile_config = gb_tiling_config;
-       rdev->config.rv770.backend_map = backend_map;
-       gb_tiling_config |= BACKEND_MAP(backend_map);
 
        WREG32(GB_TILING_CONFIG, gb_tiling_config);
        WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
-       WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
-       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-       WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
-
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
        WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_USER_TCC_DISABLE, 0);
 
-       num_qd_pipes =
-               R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+
+       num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
        WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
 
@@ -776,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
                                       ACK_FLUSH_CTL(3) |
                                       SYNC_FLUSH_CTL));
 
+       if (rdev->family != CHIP_RV770)
+               WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
        db_debug3 = RREG32(DB_DEBUG3);
        db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
        switch (rdev->family) {
@@ -809,8 +652,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
        WREG32(VGT_NUM_INSTANCES, 1);
 
-       WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-
        WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
 
        WREG32(CP_PERFMON_CNTL, 0);
@@ -954,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
        WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
                                          NUM_CLIP_SEQ(3)));
-
+       WREG32(VC_ENHANCE, 0);
 }
 
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -1118,6 +959,12 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -1140,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "radeon: audio init failed\n");
-               return r;
-       }
-
        return r;
 
 }
@@ -1254,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev)
                rdev->accel_working = false;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "radeon: audio init failed\n");
-               return r;
-       }
-
        return 0;
 }
 
index 9c549f702f2f35e995db4b88ffb31ad5b4d21e9b..b0adfc595d7541cdeb782c903101cf92c6d1ce37 100644 (file)
 #define                BACKEND_MAP(x)                                  ((x) << 16)
 
 #define GB_TILING_CONFIG                               0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
 
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
+#define                INACTIVE_QD_PIPES_SHIFT                     8
 #define                INACTIVE_SIMDS(x)                               ((x) << 16)
 #define                INACTIVE_SIMDS_MASK                             0x00FF0000
 
 #define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
 #define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
 #define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+#define        MC_VM_MD_L1_TLB3_CNTL                           0x2698
 #define        MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR              0x203C
 #define        MC_VM_SYSTEM_APERTURE_HIGH_ADDR                 0x2038
 #define        MC_VM_SYSTEM_APERTURE_LOW_ADDR                  0x2034
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 
+#define        SMX_SAR_CTL0                                    0xA008
 #define        SMX_DC_CTL0                                     0xA020
 #define                USE_HASH_FUNCTION                               (1 << 0)
 #define                CACHE_DEPTH(x)                                  ((x) << 1)
 #define        TCP_CNTL                                        0x9610
 #define        TCP_CHAN_STEER                                  0x9614
 
+#define        VC_ENHANCE                                      0x9714
+
 #define        VGT_CACHE_INVALIDATION                          0x88C4
 #define                CACHE_INVALIDATION(x)                           ((x)<<0)
 #define                        VC_ONLY                                         0
index 549732e56ca959b0f07d4ce32f0ff0af0e480ab1..0b0279291a73e79109dd95db5ceee4823be1da66 100644 (file)
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
 /*
  * Core functions
  */
-static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                          u32 num_tile_pipes,
-                                          u32 num_backends_per_asic,
-                                          u32 *backend_disable_mask_per_asic,
-                                          u32 num_shader_engines)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask = 0;
-       u32 enabled_backends_count = 0;
-       u32 num_backends_per_se;
-       u32 cur_pipe;
-       u32 swizzle_pipe[SI_MAX_PIPES];
-       u32 cur_backend = 0;
-       u32 i;
-       bool force_no_swizzle;
-
-       /* force legal values */
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_tile_pipes > rdev->config.si.max_tile_pipes)
-               num_tile_pipes = rdev->config.si.max_tile_pipes;
-       if (num_shader_engines < 1)
-               num_shader_engines = 1;
-       if (num_shader_engines > rdev->config.si.max_shader_engines)
-               num_shader_engines = rdev->config.si.max_shader_engines;
-       if (num_backends_per_asic < num_shader_engines)
-               num_backends_per_asic = num_shader_engines;
-       if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
-               num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
-
-       /* make sure we have the same number of backends per se */
-       num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
-       /* set up the number of backends per se */
-       num_backends_per_se = num_backends_per_asic / num_shader_engines;
-       if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
-               num_backends_per_se = rdev->config.si.max_backends_per_se;
-               num_backends_per_asic = num_backends_per_se * num_shader_engines;
-       }
-
-       /* create enable mask and count for enabled backends */
-       for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-               if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends_per_asic)
-                       break;
-       }
-
-       /* force the backends mask to match the current number of backends */
-       if (enabled_backends_count != num_backends_per_asic) {
-               u32 this_backend_enabled;
-               u32 shader_engine;
-               u32 backend_per_se;
-
-               enabled_backends_mask = 0;
-               enabled_backends_count = 0;
-               *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
-               for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-                       /* calc the current se */
-                       shader_engine = i / rdev->config.si.max_backends_per_se;
-                       /* calc the backend per se */
-                       backend_per_se = i % rdev->config.si.max_backends_per_se;
-                       /* default to not enabled */
-                       this_backend_enabled = 0;
-                       if ((shader_engine < num_shader_engines) &&
-                           (backend_per_se < num_backends_per_se))
-                               this_backend_enabled = 1;
-                       if (this_backend_enabled) {
-                               enabled_backends_mask |= (1 << i);
-                               *backend_disable_mask_per_asic &= ~(1 << i);
-                               ++enabled_backends_count;
-                       }
-               }
-       }
-
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
-       switch (rdev->family) {
-       case CHIP_TAHITI:
-       case CHIP_PITCAIRN:
-       case CHIP_VERDE:
-               force_no_swizzle = true;
-               break;
-       default:
-               force_no_swizzle = false;
-               break;
-       }
-       if (force_no_swizzle) {
-               bool last_backend_enabled = false;
-
-               force_no_swizzle = false;
-               for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-                       if (((enabled_backends_mask >> i) & 1) == 1) {
-                               if (last_backend_enabled)
-                                       force_no_swizzle = true;
-                               last_backend_enabled = true;
-                       } else
-                               last_backend_enabled = false;
-               }
-       }
-
-       switch (num_tile_pipes) {
-       case 1:
-       case 3:
-       case 5:
-       case 7:
-               DRM_ERROR("odd number of pipes!\n");
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-                       swizzle_pipe[3] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 1;
-                       swizzle_pipe[5] = 3;
-                       swizzle_pipe[6] = 5;
-                       swizzle_pipe[7] = 7;
-               }
-               break;
-       }
-
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-
-               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-               cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
-static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
-                                       u32 disable_mask_per_se,
-                                       u32 max_disable_mask_per_se,
-                                       u32 num_shader_engines)
-{
-       u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
-       u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
-       if (num_shader_engines == 1)
-               return disable_mask_per_asic;
-       else if (num_shader_engines == 2)
-               return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
-       else
-               return 0xffffffff;
-}
-
 static void si_tiling_mode_table_init(struct radeon_device *rdev)
 {
        const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
                DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
 }
 
+static void si_select_se_sh(struct radeon_device *rdev,
+                           u32 se_num, u32 sh_num)
+{
+       u32 data = INSTANCE_BROADCAST_WRITES;
+
+       if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+               data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+       else if (se_num == 0xffffffff)
+               data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+       else if (sh_num == 0xffffffff)
+               data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+       else
+               data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+       WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+       u32 i, mask = 0;
+
+       for (i = 0; i < bit_width; i++) {
+               mask <<= 1;
+               mask |= 1;
+       }
+       return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+       if (data & 1)
+               data &= INACTIVE_CUS_MASK;
+       else
+               data = 0;
+       data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+       data >>= INACTIVE_CUS_SHIFT;
+
+       mask = si_create_bitmask(cu_per_sh);
+
+       return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+                        u32 se_num, u32 sh_per_se,
+                        u32 cu_per_sh)
+{
+       int i, j, k;
+       u32 data, mask, active_cu;
+
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       si_select_se_sh(rdev, i, j);
+                       data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+                       active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+                       mask = 1;
+                       for (k = 0; k < 16; k++) {
+                               mask <<= k;
+                               if (active_cu & mask) {
+                                       data &= ~mask;
+                                       WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+                                       break;
+                               }
+                       }
+               }
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+                             u32 max_rb_num, u32 se_num,
+                             u32 sh_per_se)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_RB_BACKEND_DISABLE);
+       if (data & 1)
+               data &= BACKEND_DISABLE_MASK;
+       else
+               data = 0;
+       data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+       data >>= BACKEND_DISABLE_SHIFT;
+
+       mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+       return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+                       u32 se_num, u32 sh_per_se,
+                       u32 max_rb_num)
+{
+       int i, j;
+       u32 data, mask;
+       u32 disabled_rbs = 0;
+       u32 enabled_rbs = 0;
+
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       si_select_se_sh(rdev, i, j);
+                       data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+                       disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+               }
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+       mask = 1;
+       for (i = 0; i < max_rb_num; i++) {
+               if (!(disabled_rbs & mask))
+                       enabled_rbs |= mask;
+               mask <<= 1;
+       }
+
+       for (i = 0; i < se_num; i++) {
+               si_select_se_sh(rdev, i, 0xffffffff);
+               data = 0;
+               for (j = 0; j < sh_per_se; j++) {
+                       switch (enabled_rbs & 3) {
+                       case 1:
+                               data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 2:
+                               data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 3:
+                       default:
+                               data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+                               break;
+                       }
+                       enabled_rbs >>= 2;
+               }
+               WREG32(PA_SC_RASTER_CONFIG, data);
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
 static void si_gpu_init(struct radeon_device *rdev)
 {
-       u32 cc_rb_backend_disable = 0;
-       u32 cc_gc_shader_array_config;
        u32 gb_addr_config = 0;
        u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 gb_backend_map;
-       u32 cgts_tcc_disable;
        u32 sx_debug_1;
-       u32 gc_user_shader_array_config;
-       u32 gc_user_rb_backend_disable;
-       u32 cgts_user_tcc_disable;
        u32 hdp_host_path_cntl;
        u32 tmp;
        int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
        switch (rdev->family) {
        case CHIP_TAHITI:
                rdev->config.si.max_shader_engines = 2;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 12;
-               rdev->config.si.max_simds_per_se = 8;
+               rdev->config.si.max_cu_per_sh = 8;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 12;
                rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x100;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_PITCAIRN:
                rdev->config.si.max_shader_engines = 2;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 8;
-               rdev->config.si.max_simds_per_se = 5;
+               rdev->config.si.max_cu_per_sh = 5;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 8;
                rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x100;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_VERDE:
        default:
                rdev->config.si.max_shader_engines = 1;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 4;
-               rdev->config.si.max_simds_per_se = 2;
+               rdev->config.si.max_cu_per_sh = 2;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 4;
                rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x40;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
                break;
        }
 
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
-       cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
-       cgts_tcc_disable = 0xffff0000;
-       for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
-               cgts_tcc_disable &= ~(1 << (16 + i));
-       gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
-       gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
-       cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
-       rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
        rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
-       tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
-       tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.si.backend_disable_mask_per_asic =
-               si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
-                                            rdev->config.si.num_shader_engines);
-       rdev->config.si.backend_map =
-               si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-                                               rdev->config.si.num_backends_per_se *
-                                               rdev->config.si.num_shader_engines,
-                                               &rdev->config.si.backend_disable_mask_per_asic,
-                                               rdev->config.si.num_shader_engines);
-       tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
-       rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
        rdev->config.si.mem_max_burst_length_bytes = 256;
        tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
        rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
        rdev->config.si.num_gpus = 1;
        rdev->config.si.multi_gpu_tile_size = 64;
 
-       gb_addr_config = 0;
-       switch (rdev->config.si.num_tile_pipes) {
-       case 1:
-               gb_addr_config |= NUM_PIPES(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_PIPES(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_PIPES(2);
-               break;
-       case 8:
-       default:
-               gb_addr_config |= NUM_PIPES(3);
-               break;
-       }
-
-       tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
-       gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
-       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
-       tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
-       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
-       switch (rdev->config.si.num_gpus) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_GPUS(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_GPUS(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_GPUS(2);
-               break;
-       }
-       switch (rdev->config.si.multi_gpu_tile_size) {
-       case 16:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
-               break;
-       case 32:
-       default:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
-               break;
-       case 64:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-               break;
-       case 128:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
-               break;
-       }
+       /* fix up row size */
+       gb_addr_config &= ~ROW_SIZE_MASK;
        switch (rdev->config.si.mem_row_size_in_kb) {
        case 1:
        default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
                break;
        }
 
-       tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
-       rdev->config.si.num_tile_pipes = (1 << tmp);
-       tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
-       rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
-       tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
-       rdev->config.si.num_shader_engines = tmp + 1;
-       tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
-       rdev->config.si.num_gpus = tmp + 1;
-       tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
-       rdev->config.si.multi_gpu_tile_size = 1 << tmp;
-       tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
-       rdev->config.si.mem_row_size_in_kb = 1 << tmp;
-
-       gb_backend_map =
-               si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-                                               rdev->config.si.num_backends_per_se *
-                                               rdev->config.si.num_shader_engines,
-                                               &rdev->config.si.backend_disable_mask_per_asic,
-                                               rdev->config.si.num_shader_engines);
-
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
         * not have bank info, so create a custom tiling dword.
         * bits 3:0   num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.tile_config |= (3 << 0);
                break;
        }
-       rdev->config.si.tile_config |=
-               ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+       if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+               rdev->config.si.tile_config |= 1 << 4;
+       else
+               rdev->config.si.tile_config |= 0 << 4;
        rdev->config.si.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.si.tile_config |=
                ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
-       rdev->config.si.backend_map = gb_backend_map;
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       /* primary versions */
-       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
-       WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+       si_tiling_mode_table_init(rdev);
 
-       /* user versions */
-       WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
+       si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+                   rdev->config.si.max_sh_per_se,
+                   rdev->config.si.max_backends_per_se);
 
-       WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+       si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+                    rdev->config.si.max_sh_per_se,
+                    rdev->config.si.max_cu_per_sh);
 
-       si_tiling_mode_table_init(rdev);
 
        /* set HW defaults for 3D engine */
        WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -2518,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15DC, 0);
 
        /* empty context1-15 */
-       /* FIXME start with 1G, once using 2 level pt switch to full
+       /* FIXME start with 4G, once using 2 level pt switch to full
         * vm size space
         */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
index eda938a7cb6e1ffea2c2f6f7cedac0430541ec3c..501f9d431d5785de91f3aba3cb3a8e1c3aa74ea3 100644 (file)
 #define SI_DC_GPIO_HPD_EN                        0x65b8
 #define SI_DC_GPIO_HPD_Y                         0x65bc
 
+#define SI_GRPH_CONTROL                          0x6804
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)           (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2                  0
+#       define SI_ADDR_SURF_P4_8x16             4
+#       define SI_ADDR_SURF_P4_16x16            5
+#       define SI_ADDR_SURF_P4_16x32            6
+#       define SI_ADDR_SURF_P4_32x32            7
+#       define SI_ADDR_SURF_P8_16x16_8x16       8
+#       define SI_ADDR_SURF_P8_16x32_8x16       9
+#       define SI_ADDR_SURF_P8_32x32_8x16       10
+#       define SI_ADDR_SURF_P8_16x32_16x16      11
+#       define SI_ADDR_SURF_P8_32x32_16x16      12
+#       define SI_ADDR_SURF_P8_32x32_16x32      13
+#       define SI_ADDR_SURF_P8_32x64_32x32      14
+
 #endif
index 53ea2c42dbd6f96954180fb617b13a1c4ff20313..db4067962868a07c0814c037b7c4eab2a8b52114 100644 (file)
 #ifndef SI_H
 #define SI_H
 
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+
 #define        CG_MULT_THERMAL_STATUS                                  0x714
 #define                ASIC_MAX_TEMP(x)                                ((x) << 0)
 #define                ASIC_MAX_TEMP_MASK                              0x000001ff
 #define                SOFT_RESET_IA                                   (1 << 15)
 
 #define GRBM_GFX_INDEX                                 0x802C
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SH_INDEX(x)                             ((x) << 8)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                SH_BROADCAST_WRITES                     (1 << 29)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
 
 #define GRBM_INT_CNTL                                   0x8060
 #       define RDERR_INT_ENABLE                         (1 << 0)
 #define        VGT_TF_MEMORY_BASE                              0x89B8
 
 #define CC_GC_SHADER_ARRAY_CONFIG                      0x89bc
+#define                INACTIVE_CUS_MASK                       0xFFFF0000
+#define                INACTIVE_CUS_SHIFT                      16
 #define GC_USER_SHADER_ARRAY_CONFIG                    0x89c0
 
 #define        PA_CL_ENHANCE                                   0x8A14
 #define RLC_MC_CNTL                                       0xC344
 #define RLC_UCODE_CNTL                                    0xC348
 
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
 #define VGT_EVENT_INITIATOR                             0x28a90
 #       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
 #       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
index 30d98d14b5c586bd6d53a845488c2368a7ab9bc4..dd14cd1a0033e5a91d948c08a92e81772926674e 100644 (file)
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv == NULL)
                return -ENOMEM;
 
+       idr_init(&dev_priv->object_idr);
        dev->dev_private = (void *)dev_priv;
        dev_priv->chipset = chipset;
-       idr_init(&dev->object_name_idr);
 
        return 0;
 }
index 36792bd4da77598e69dad490b3cf4b4ac39828f1..36f4b28c1b90a499043a42b9b2bce9894401d36e 100644 (file)
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                        (*destroy)(bo);
                else
                        kfree(bo);
+               ttm_mem_global_free(mem_glob, acc_size);
                return -EINVAL;
        }
        bo->destroy = destroy;
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
                        struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo;
-       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
        size_t acc_size;
        int ret;
 
-       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
-       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
-       if (unlikely(ret != 0))
-               return ret;
-
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
-       if (unlikely(bo == NULL)) {
-               ttm_mem_global_free(mem_glob, acc_size);
+       if (unlikely(bo == NULL))
                return -ENOMEM;
-       }
 
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
        ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
                                buffer_start, interruptible,
                          persistent_swap_storage, acc_size, NULL, NULL);
@@ -1834,6 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                        spin_unlock(&glob->lru_lock);
                        (void) ttm_bo_cleanup_refs(bo, false, false, false);
                        kref_put(&bo->list_kref, ttm_bo_release_list);
+                       spin_lock(&glob->lru_lock);
                        continue;
                }
 
index 4d02c46a9420b9043b13981d2f10f95b913f1ed5..6e52069894b35d91037474521e5ebf6e2f157e98 100644 (file)
 
 static struct drm_driver driver;
 
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
 static struct usb_device_id id_table[] = {
-       {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+       {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+        .bInterfaceSubClass = 0x00,
+        .bInterfaceProtocol = 0x00,
+        .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+                       USB_DEVICE_ID_MATCH_INT_CLASS |
+                       USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+                       USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
        {},
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index a029ee39b0c526d0e0fe63150acd25ea992f73c0..ce9a61179925cd540f4d4956b991513ca53b27e2 100644 (file)
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
        if (!fb->active_16)
                return 0;
 
-       if (!fb->obj->vmapping)
-               udl_gem_vmap(fb->obj);
+       if (!fb->obj->vmapping) {
+               ret = udl_gem_vmap(fb->obj);
+               if (ret == -ENOMEM) {
+                       DRM_ERROR("failed to vmap fb\n");
+                       return 0;
+               }
+               if (!fb->obj->vmapping) {
+                       DRM_ERROR("failed to vmapping\n");
+                       return 0;
+               }
+       }
 
        start_cycles = get_cycles();
 
index 40efd32f7dce85f0d45e8fce92cc64ea1e11990e..7bd65bdd15a8092e955d959c08fd6dd04781d490 100644 (file)
@@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
        int page_count = obj->base.size / PAGE_SIZE;
        int ret;
 
+       if (obj->base.import_attach) {
+               ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
+                                              0, obj->base.size, DMA_BIDIRECTIONAL);
+               if (ret)
+                       return -EINVAL;
+
+               obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+               if (!obj->vmapping)
+                       return -ENOMEM;
+               return 0;
+       }
+               
        ret = udl_gem_get_pages(obj, GFP_KERNEL);
        if (ret)
                return ret;
@@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
 
 void udl_gem_vunmap(struct udl_gem_object *obj)
 {
+       if (obj->base.import_attach) {
+               dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+               dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
+                                      obj->base.size, DMA_BIDIRECTIONAL);
+               return;
+       }
+
        if (obj->vmapping)
                vunmap(obj->vmapping);
 
@@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct udl_gem_object *obj = to_udl_bo(gem_obj);
 
-       if (gem_obj->import_attach)
-               drm_prime_gem_destroy(gem_obj, obj->sg);
-
        if (obj->vmapping)
                udl_gem_vunmap(obj);
 
+       if (gem_obj->import_attach)
+               drm_prime_gem_destroy(gem_obj, obj->sg);
+
        if (obj->pages)
                udl_gem_put_pages(obj);
 
@@ -234,7 +253,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 
        ret = udl_gem_get_pages(gobj, GFP_KERNEL);
        if (ret)
-               return ret;
+               goto out;
        if (!gobj->base.map_list.map) {
                ret = drm_gem_create_mmap_offset(obj);
                if (ret)
@@ -257,8 +276,6 @@ static int udl_prime_create(struct drm_device *dev,
 {
        struct udl_gem_object *obj;
        int npages;
-       int i;
-       struct scatterlist *iter;
 
        npages = size / PAGE_SIZE;
 
index a8d5f09428c72a1887c26d7374b6f971bc871a1e..4c2d836a0893f36f91247a5ce38d6cfed883faa8 100644 (file)
@@ -61,7 +61,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
                        u8 length;
                        u16 key;
 
-                       key = *((u16 *) desc);
+                       key = le16_to_cpu(*((u16 *) desc));
                        desc += sizeof(u16);
                        length = *desc;
                        desc++;
index 1f182254e81e84beb0c4ed613c02e081ebf38258..c126182ac07eee1411e453dffdff43703302c13c 100644 (file)
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv == NULL)
                return -ENOMEM;
 
+       idr_init(&dev_priv->object_idr);
        dev->dev_private = (void *)dev_priv;
 
        dev_priv->chipset = chipset;
 
-       idr_init(&dev->object_name_idr);
-
        pci_set_master(dev->pdev);
 
        ret = drm_vblank_init(dev, 1);
index 51c9ba5cd2fbff85f4411ec6629f112ad8373a70..21ee7822656041c2e99c7f8f549038034f56c6ce 100644 (file)
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
        cmd += sizeof(remap_cmd) / sizeof(uint32);
 
        for (i = 0; i < num_pages; ++i) {
-               if (VMW_PPN_SIZE > 4)
+               if (VMW_PPN_SIZE <= 4)
                        *cmd = page_to_pfn(*pages++);
                else
                        *((uint64_t *)cmd) = page_to_pfn(*pages++);
index 38f9534ac513bcf51b90fcfff56d6bd6ed84c988..5b3c7d135dc91c407fc9b2bf19b2e3e912ccfed5 100644 (file)
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head)
        return NULL;
 }
 
+int vga_switcheroo_get_client_state(struct pci_dev *pdev)
+{
+       struct vga_switcheroo_client *client;
+
+       client = find_client_from_pci(&vgasr_priv.clients, pdev);
+       if (!client)
+               return VGA_SWITCHEROO_NOT_FOUND;
+       if (!vgasr_priv.active)
+               return VGA_SWITCHEROO_INIT;
+       return client->pwr_state;
+}
+EXPORT_SYMBOL(vga_switcheroo_get_client_state);
+
 void vga_switcheroo_unregister_client(struct pci_dev *pdev)
 {
        struct vga_switcheroo_client *client;
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
                vga_switchon(new_client);
 
        vga_set_default_device(new_client->pdev);
-       set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
-
        return 0;
 }
 
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
 
        active->active = false;
 
+       set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+
        if (new_client->fb_info) {
                struct fb_event event;
                event.info = new_client->fb_info;
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
        if (new_client->ops->reprobe)
                new_client->ops->reprobe(new_client->pdev);
 
-       set_audio_state(active->id, VGA_SWITCHEROO_OFF);
-
        if (active->pwr_state == VGA_SWITCHEROO_ON)
                vga_switchoff(active);
 
+       set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
        new_client->active = true;
        return 0;
 }
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
        /* pwr off the device not in use */
        if (strncmp(usercmd, "OFF", 3) == 0) {
                list_for_each_entry(client, &vgasr_priv.clients, list) {
-                       if (client->active)
+                       if (client->active || client_is_audio(client))
                                continue;
+                       set_audio_state(client->id, VGA_SWITCHEROO_OFF);
                        if (client->pwr_state == VGA_SWITCHEROO_ON)
                                vga_switchoff(client);
                }
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
        /* pwr on the device not in use */
        if (strncmp(usercmd, "ON", 2) == 0) {
                list_for_each_entry(client, &vgasr_priv.clients, list) {
-                       if (client->active)
+                       if (client->active || client_is_audio(client))
                                continue;
                        if (client->pwr_state == VGA_SWITCHEROO_OFF)
                                vga_switchon(client);
+                       set_audio_state(client->id, VGA_SWITCHEROO_ON);
                }
                goto out;
        }
index 034c80a10f1fd547ed2757503d63f74965105118..3fda8c87f02cd21e4c5eaf16d67010db203aaaf4 100644 (file)
@@ -1,20 +1,11 @@
 #
 # HID driver configuration
 #
-menuconfig HID_SUPPORT
-       bool "HID Devices"
-       depends on INPUT
-       default y
-       ---help---
-         Say Y here to get to see options for various computer-human interface
-         device drivers. This option alone does not add any kernel code.
-
-         If you say N, all options in this submenu will be skipped and disabled.
-
-if HID_SUPPORT
+menu "HID support"
+     depends on INPUT
 
 config HID
-       tristate "Generic HID support"
+       tristate "HID bus support"
        depends on INPUT
        default y
        ---help---
@@ -23,14 +14,17 @@ config HID
          most commonly used to refer to the USB-HID specification, but other
          devices (such as, but not strictly limited to, Bluetooth) are
          designed using HID specification (this involves certain keyboards,
-         mice, tablets, etc). This option compiles into kernel the generic
-         HID layer code (parser, usages, etc.), which can then be used by
-         transport-specific HID implementation (like USB or Bluetooth).
+         mice, tablets, etc). This option adds the HID bus to the kernel,
+         together with generic HID layer code. The HID devices are added and
+         removed from the HID bus by the transport-layer drivers, such as
+         usbhid (USB_HID) and hidp (BT_HIDP).
 
          For docs and specs, see http://www.usb.org/developers/hidpage/
 
          If unsure, say Y.
 
+if HID
+
 config HID_BATTERY_STRENGTH
        bool "Battery level reporting for HID devices"
        depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
@@ -59,23 +53,22 @@ config HIDRAW
 
        If unsure, say Y.
 
-source "drivers/hid/usbhid/Kconfig"
-
-menu "Special HID drivers"
-       depends on HID
-
 config HID_GENERIC
        tristate "Generic HID driver"
        depends on HID
-       default y
+       default HID
        ---help---
-       Support for generic HID devices.
+       Support for generic devices on the HID bus. This includes most
+       keyboards and mice, joysticks, tablets and digitizers.
 
        To compile this driver as a module, choose M here: the module
        will be called hid-generic.
 
        If unsure, say Y.
 
+menu "Special HID drivers"
+       depends on HID
+
 config HID_A4TECH
        tristate "A4 tech mice" if EXPERT
        depends on USB_HID
@@ -393,6 +386,7 @@ config HID_MULTITOUCH
          - Unitec Panels
          - XAT optical touch panels
          - Xiroku optical touch panels
+         - Zytronic touch panels
 
          If unsure, say N.
 
@@ -662,4 +656,8 @@ config HID_ZYDACRON
 
 endmenu
 
-endif # HID_SUPPORT
+endif # HID
+
+source "drivers/hid/usbhid/Kconfig"
+
+endmenu
index fa10f847f7dbf163ba5856c296690039ba07b242..585344b6d33815632b8d477ebb1f0eb9e7f80818 100644 (file)
@@ -517,6 +517,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index 8e3a6b261477d9866ec45c83df4b2766098d281d..4c87276c8ddba2f2a6f072cb536e84f8b7a6d256 100644 (file)
@@ -1503,6 +1503,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1880,6 +1883,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
        { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
@@ -1994,6 +1998,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2088,6 +2093,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index 9373f535dfe9908c14b8ff04afbe90a981693a54..32039235cfeea6e9025c2ce6d0eed04f3294b487 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS    0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
 #define USB_VENDOR_ID_AVERMEDIA                0x07ca
 #define USB_DEVICE_ID_AVER_FM_MR800    0xb800
 
+#define USB_VENDOR_ID_AXENTIA          0x12cf
+#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111
+
 #define USB_VENDOR_ID_BAANTO           0x2453
 #define USB_DEVICE_ID_BAANTO_MT_190W2  0x0100
 
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
 #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL        0x0007
 
+#define USB_VENDOR_ID_MADCATZ          0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
+
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE       0x0600
 
+#define USB_VENDOR_ID_SENNHEISER       0x1395
+#define USB_DEVICE_ID_SENNHEISER_BTD500USB     0x002c
+
 #define USB_VENDOR_ID_SIGMA_MICRO      0x1c4f
 #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD     0x0002
 
 #define USB_VENDOR_ID_ZYDACRON 0x13EC
 #define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL  0x0006
 
+#define USB_VENDOR_ID_ZYTRONIC         0x14c8
+#define USB_DEVICE_ID_ZYTRONIC_ZXY100  0x0005
+
 #define USB_VENDOR_ID_PRIMAX   0x0461
 #define USB_DEVICE_ID_PRIMAX_KEYBOARD  0x4e05
 
index 132b0019365eed9057eeaaf0a3b222eece0853ad..5301006f6c15fb0246d42073c79931259db9312a 100644 (file)
@@ -301,6 +301,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+               USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        {}
 };
 
index 5e8a7ed42344d67d1c874fe3e3242766aed1b537..0f9c146fc00d7391932bcaef9df70e4fbc55541e 100644 (file)
@@ -436,27 +436,37 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
 
 static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
 {
-       struct dj_report dj_report;
+       struct dj_report *dj_report;
+       int retval;
 
-       memset(&dj_report, 0, sizeof(dj_report));
-       dj_report.report_id = REPORT_ID_DJ_SHORT;
-       dj_report.device_index = 0xFF;
-       dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
-       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       if (!dj_report)
+               return -ENOMEM;
+       dj_report->report_id = REPORT_ID_DJ_SHORT;
+       dj_report->device_index = 0xFF;
+       dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+       retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+       kfree(dj_report);
+       return retval;
 }
 
 static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
                                          unsigned timeout)
 {
-       struct dj_report dj_report;
+       struct dj_report *dj_report;
+       int retval;
 
-       memset(&dj_report, 0, sizeof(dj_report));
-       dj_report.report_id = REPORT_ID_DJ_SHORT;
-       dj_report.device_index = 0xFF;
-       dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
-       dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
-       dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
-       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       if (!dj_report)
+               return -ENOMEM;
+       dj_report->report_id = REPORT_ID_DJ_SHORT;
+       dj_report->device_index = 0xFF;
+       dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+       dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+       dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+       retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+       kfree(dj_report);
+       return retval;
 }
 
 
index 7cf3ffe4b7bc26f4eeda3f2167c1f88f6e46e29b..40ac6654f1d19f5d1bf5b02dacfd4d7d5f98c14e 100644 (file)
@@ -426,8 +426,10 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
                __set_bit(EV_ABS, input->evbit);
 
                input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
-               input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
-               input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
+               input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+                                    4, 0);
+               input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+                                    4, 0);
                input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
 
                /* Note: Touch Y position from the device is inverted relative
index 6e3332a99976d669e008131e0851ae8788113749..76479246d4ee35ef537cf66e83fe5c9f1a6683af 100644 (file)
@@ -1048,6 +1048,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
                        USB_DEVICE_ID_XIROKU_CSR2) },
 
+       /* Zytronic panels */
+       { .driver_data = MT_CLS_SERIAL,
+               MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC,
+                       USB_DEVICE_ID_ZYTRONIC_ZXY100) },
+
        /* Generic MT device */
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
        { }
index 0f20fd17cf066ad6b32a868dd8069c0be83077cf..0108c5991a0417679d04a9d048d757d9e3f178f4 100644 (file)
@@ -1,13 +1,13 @@
-comment "USB Input Devices"
+menu "USB HID support"
        depends on USB
 
 config USB_HID
-       tristate "USB Human Interface Device (full HID) support"
+       tristate "USB HID transport layer"
        default y
        depends on USB && INPUT
        select HID
        ---help---
-         Say Y here if you want full HID support to connect USB keyboards,
+         Say Y here if you want to connect USB keyboards,
          mice, joysticks, graphic tablets, or any other HID based devices
          to your computer via USB, as well as Uninterruptible Power Supply
          (UPS) and monitor control devices.
@@ -81,4 +81,4 @@ config USB_MOUSE
 
 endmenu
 
-
+endmenu
index 0597ee604f6e1d096bf1404a6161949df73de154..903eef3d3e10034f8e36974ba4e95ee746438702 100644 (file)
@@ -76,6 +76,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
index 7cd9bf42108b7368803322d7116c3a8951d30a1b..6f1d167cb1ea9c2174c403a8a0f0b6b0f1d33bc7 100644 (file)
@@ -1036,8 +1036,9 @@ config SENSORS_SCH56XX_COMMON
 
 config SENSORS_SCH5627
        tristate "SMSC SCH5627"
-       depends on !PPC
+       depends on !PPC && WATCHDOG
        select SENSORS_SCH56XX_COMMON
+       select WATCHDOG_CORE
        help
          If you say yes here you get support for the hardware monitoring
          features of the SMSC SCH5627 Super-I/O chip including support for
@@ -1048,8 +1049,9 @@ config SENSORS_SCH5627
 
 config SENSORS_SCH5636
        tristate "SMSC SCH5636"
-       depends on !PPC
+       depends on !PPC && WATCHDOG
        select SENSORS_SCH56XX_COMMON
+       select WATCHDOG_CORE
        help
          SMSC SCH5636 Super I/O chips include an embedded microcontroller for
          hardware monitoring solutions, allowing motherboard manufacturers to
index f082e48ab11395e3f99545d3feea2b3def29d32a..2cde9ecf7731b6a66bd745997171300b799a35a7 100644 (file)
@@ -8,7 +8,7 @@
  *
  * Based on hdaps.c driver:
  * Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
  *
  * Fan control based on smcFanControl:
  * Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com>
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
        int i;
 
        if (send_command(cmd) || send_argument(key)) {
-               pr_warn("%s: read arg fail\n", key);
+               pr_warn("%.4s: read arg fail\n", key);
                return -EIO;
        }
 
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
 
        for (i = 0; i < len; i++) {
                if (__wait_status(0x05)) {
-                       pr_warn("%s: read data fail\n", key);
+                       pr_warn("%.4s: read data fail\n", key);
                        return -EIO;
                }
                buffer[i] = inb(APPLESMC_DATA_PORT);
index b9d512331ed49561331b638b78f29e5120288b39..637c51c11b44ca774057404b6e5326a4ba1de31d 100644 (file)
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
        return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
 }
 
+struct tjmax {
+       char const *id;
+       int tjmax;
+};
+
+static struct tjmax __cpuinitconst tjmax_table[] = {
+       { "CPU D410", 100000 },
+       { "CPU D425", 100000 },
+       { "CPU D510", 100000 },
+       { "CPU D525", 100000 },
+       { "CPU N450", 100000 },
+       { "CPU N455", 100000 },
+       { "CPU N470", 100000 },
+       { "CPU N475", 100000 },
+       { "CPU  230", 100000 },
+       { "CPU  330", 125000 },
+};
+
 static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
                                  struct device *dev)
 {
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
        int err;
        u32 eax, edx;
        struct pci_dev *host_bridge;
+       int i;
+
+       /* explicit tjmax table entries override heuristics */
+       for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
+               if (strstr(c->x86_model_id, tjmax_table[i].id))
+                       return tjmax_table[i].tjmax;
+       }
 
        /* Early chips have no MSR for TjMax */
 
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
 
        /* Atom CPUs */
 
-       if (c->x86_model == 0x1c) {
+       if (c->x86_model == 0x1c || c->x86_model == 0x26
+           || c->x86_model == 0x27) {
                usemsr_ee = 0;
 
                host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
                        tjmax = 90000;
 
                pci_dev_put(host_bridge);
+       } else if (c->x86_model == 0x36) {
+               usemsr_ee = 0;
+               tjmax = 100000;
        }
 
        if (c->x86_model > 0xe && usemsr_ee) {
@@ -664,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
         * sensors. We check this bit only, all the early CPUs
         * without thermal sensors will be filtered out.
         */
-       if (!cpu_has(c, X86_FEATURE_DTS))
+       if (!cpu_has(c, X86_FEATURE_DTHERM))
                return;
 
        if (!pdev) {
@@ -765,14 +794,14 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
 };
 
 static const struct x86_cpu_id coretemp_ids[] = {
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 
 static int __init coretemp_init(void)
 {
-       int i, err = -ENODEV;
+       int i, err;
 
        /*
         * CPUID.06H.EAX[0] indicates whether the CPU has thermal
index 9691f664c76eb236eba72407990cc5b5aaadbf6c..e7d234b59312f7ed479c9fcc0ed309500e230c93 100644 (file)
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
                data->fan_rpm_control = true;
                break;
        default:
-               mutex_unlock(&data->update_lock);
-               return -EINVAL;
+               count = -EINVAL;
+               goto err;
        }
 
-       read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+       result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+       if (result) {
+               count = result;
+               goto err;
+       }
 
        if (data->fan_rpm_control)
                conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
                conf_reg &= ~0x80;
 
        i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
-
+err:
        mutex_unlock(&data->update_lock);
        return count;
 }
index e7701d99f8e8598207a18de6b6ad7f48a0796073..f1de3979181fd22517c1f068a57d4526cb0ce6e8 100644 (file)
@@ -2341,7 +2341,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
 
        /* Start monitoring */
        it87_write_value(data, IT87_REG_CONFIG,
-                        (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+                        (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
                         | (update_vbat ? 0x41 : 0x01));
 }
 
index a9bfd6736d9a8b46b2ac143757e695ec0b695b74..e72ba5d2a8248e04ebf4bf65662007e57b960ca3 100644 (file)
@@ -590,6 +590,6 @@ abort:
 
 module_i2c_driver(jc42_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("JC42 driver");
 MODULE_LICENSE("GPL");
index d264937c7f5e09f8fa9c2cf453d6c4e4b37bb3bf..bd75d2415432dfefada86bff1471a9cc016cc032 100644 (file)
@@ -567,6 +567,6 @@ static struct i2c_driver pem_driver = {
 
 module_i2c_driver(pem_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
 MODULE_LICENSE("GPL");
index 069b7d34d8f9216252678a056efcf5203c02e114..77476a575c4e6340b5987ff7a50e600c42c55b13 100644 (file)
@@ -292,6 +292,6 @@ static struct i2c_driver ltc4261_driver = {
 
 module_i2c_driver(ltc4261_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("LTC4261 driver");
 MODULE_LICENSE("GPL");
index 822261be84dd5ed65639ebf31e04f8ecf0edb364..019427d7a5fd8be8f3bc1fbc1550f375778f34fa 100644 (file)
@@ -692,6 +692,6 @@ static struct i2c_driver max16065_driver = {
 
 module_i2c_driver(max16065_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("MAX16065 driver");
 MODULE_LICENSE("GPL");
index 8ec6dfbccb640f8e3f89d22c701cc6151ffcebfe..8342275378b85759f57b9af583f34d281e58cd25 100644 (file)
@@ -579,7 +579,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
        }
 
        /* Note failing to register the watchdog is not a fatal error */
-       data->watchdog = sch56xx_watchdog_register(data->addr,
+       data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
                        (build_code << 24) | (build_id << 8) | hwmon_rev,
                        &data->update_lock, 1);
 
index 906d4ed32d81abd2b6c7d59f1fcd77c035fad8bc..96a7e68718cadb8348ea4d7680750ade1d6d143b 100644 (file)
@@ -510,7 +510,7 @@ static int __devinit sch5636_probe(struct platform_device *pdev)
        }
 
        /* Note failing to register the watchdog is not a fatal error */
-       data->watchdog = sch56xx_watchdog_register(data->addr,
+       data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
                                        (revision[0] << 8) | revision[1],
                                        &data->update_lock, 0);
 
index ce52fc57d41d70cf8bb119e9e3a2d3756e294615..4380f5d07be2b8b5398d43e2e393307f099341e6 100644 (file)
@@ -66,15 +66,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
 
 struct sch56xx_watchdog_data {
        u16 addr;
-       u32 revision;
        struct mutex *io_lock;
-       struct mutex watchdog_lock;
-       struct list_head list; /* member of the watchdog_data_list */
        struct kref kref;
-       struct miscdevice watchdog_miscdev;
-       unsigned long watchdog_is_open;
-       char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
-       char watchdog_expect_close;
+       struct watchdog_info wdinfo;
+       struct watchdog_device wddev;
        u8 watchdog_preset;
        u8 watchdog_control;
        u8 watchdog_output_enable;
@@ -82,15 +77,6 @@ struct sch56xx_watchdog_data {
 
 static struct platform_device *sch56xx_pdev;
 
-/*
- * Somewhat ugly :( global data pointer list with all sch56xx devices, so that
- * we can find our device data as when using misc_register there is no other
- * method to get to ones device data from the open fop.
- */
-static LIST_HEAD(watchdog_data_list);
-/* Note this lock not only protect list access, but also data.kref access */
-static DEFINE_MUTEX(watchdog_data_mutex);
-
 /* Super I/O functions */
 static inline int superio_inb(int base, int reg)
 {
@@ -272,22 +258,22 @@ EXPORT_SYMBOL(sch56xx_read_virtual_reg12);
  * Watchdog routines
  */
 
-/*
- * Release our data struct when the platform device has been released *and*
- * all references to our watchdog device are released.
- */
-static void sch56xx_watchdog_release_resources(struct kref *r)
+/* Release our data struct when we're unregistered *and*
+   all references to our watchdog device are released */
+static void watchdog_release_resources(struct kref *r)
 {
        struct sch56xx_watchdog_data *data =
                container_of(r, struct sch56xx_watchdog_data, kref);
        kfree(data);
 }
 
-static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
-                               int timeout)
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+                               unsigned int timeout)
 {
-       int ret, resolution;
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
+       unsigned int resolution;
        u8 control;
+       int ret;
 
        /* 1 second or 60 second resolution? */
        if (timeout <= 255)
@@ -298,12 +284,6 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
        if (timeout < resolution || timeout > (resolution * 255))
                return -EINVAL;
 
-       mutex_lock(&data->watchdog_lock);
-       if (!data->addr) {
-               ret = -ENODEV;
-               goto leave;
-       }
-
        if (resolution == 1)
                control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC;
        else
@@ -316,7 +296,7 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
                                                control);
                mutex_unlock(data->io_lock);
                if (ret)
-                       goto leave;
+                       return ret;
 
                data->watchdog_control = control;
        }
@@ -326,38 +306,17 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
         * the watchdog countdown.
         */
        data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+       wddev->timeout = data->watchdog_preset * resolution;
 
-       ret = data->watchdog_preset * resolution;
-leave:
-       mutex_unlock(&data->watchdog_lock);
-       return ret;
-}
-
-static int watchdog_get_timeout(struct sch56xx_watchdog_data *data)
-{
-       int timeout;
-
-       mutex_lock(&data->watchdog_lock);
-       if (data->watchdog_control & SCH56XX_WDOG_TIME_BASE_SEC)
-               timeout = data->watchdog_preset;
-       else
-               timeout = data->watchdog_preset * 60;
-       mutex_unlock(&data->watchdog_lock);
-
-       return timeout;
+       return 0;
 }
 
-static int watchdog_start(struct sch56xx_watchdog_data *data)
+static int watchdog_start(struct watchdog_device *wddev)
 {
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
        int ret;
        u8 val;
 
-       mutex_lock(&data->watchdog_lock);
-       if (!data->addr) {
-               ret = -ENODEV;
-               goto leave_unlock_watchdog;
-       }
-
        /*
         * The sch56xx's watchdog cannot really be started / stopped
         * it is always running, but we can avoid the timer expiring
@@ -385,18 +344,14 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
        if (ret)
                goto leave;
 
-       /* 2. Enable output (if not already enabled) */
-       if (!(data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
-               val = data->watchdog_output_enable |
-                     SCH56XX_WDOG_OUTPUT_ENABLE;
-               ret = sch56xx_write_virtual_reg(data->addr,
-                                               SCH56XX_REG_WDOG_OUTPUT_ENABLE,
-                                               val);
-               if (ret)
-                       goto leave;
+       /* 2. Enable output */
+       val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE;
+       ret = sch56xx_write_virtual_reg(data->addr,
+                                       SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+       if (ret)
+               goto leave;
 
-               data->watchdog_output_enable = val;
-       }
+       data->watchdog_output_enable = val;
 
        /* 3. Clear the watchdog event bit if set */
        val = inb(data->addr + 9);
@@ -405,234 +360,70 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
 
 leave:
        mutex_unlock(data->io_lock);
-leave_unlock_watchdog:
-       mutex_unlock(&data->watchdog_lock);
        return ret;
 }
 
-static int watchdog_trigger(struct sch56xx_watchdog_data *data)
+static int watchdog_trigger(struct watchdog_device *wddev)
 {
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
        int ret;
 
-       mutex_lock(&data->watchdog_lock);
-       if (!data->addr) {
-               ret = -ENODEV;
-               goto leave;
-       }
-
        /* Reset the watchdog countdown counter */
        mutex_lock(data->io_lock);
        ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET,
                                        data->watchdog_preset);
        mutex_unlock(data->io_lock);
-leave:
-       mutex_unlock(&data->watchdog_lock);
+
        return ret;
 }
 
-static int watchdog_stop_unlocked(struct sch56xx_watchdog_data *data)
+static int watchdog_stop(struct watchdog_device *wddev)
 {
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
        int ret = 0;
        u8 val;
 
-       if (!data->addr)
-               return -ENODEV;
-
-       if (data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) {
-               val = data->watchdog_output_enable &
-                     ~SCH56XX_WDOG_OUTPUT_ENABLE;
-               mutex_lock(data->io_lock);
-               ret = sch56xx_write_virtual_reg(data->addr,
-                                               SCH56XX_REG_WDOG_OUTPUT_ENABLE,
-                                               val);
-               mutex_unlock(data->io_lock);
-               if (ret)
-                       return ret;
-
-               data->watchdog_output_enable = val;
-       }
-
-       return ret;
-}
-
-static int watchdog_stop(struct sch56xx_watchdog_data *data)
-{
-       int ret;
-
-       mutex_lock(&data->watchdog_lock);
-       ret = watchdog_stop_unlocked(data);
-       mutex_unlock(&data->watchdog_lock);
-
-       return ret;
-}
-
-static int watchdog_release(struct inode *inode, struct file *filp)
-{
-       struct sch56xx_watchdog_data *data = filp->private_data;
-
-       if (data->watchdog_expect_close) {
-               watchdog_stop(data);
-               data->watchdog_expect_close = 0;
-       } else {
-               watchdog_trigger(data);
-               pr_crit("unexpected close, not stopping watchdog!\n");
-       }
-
-       clear_bit(0, &data->watchdog_is_open);
-
-       mutex_lock(&watchdog_data_mutex);
-       kref_put(&data->kref, sch56xx_watchdog_release_resources);
-       mutex_unlock(&watchdog_data_mutex);
+       val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE;
+       mutex_lock(data->io_lock);
+       ret = sch56xx_write_virtual_reg(data->addr,
+                                       SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+       mutex_unlock(data->io_lock);
+       if (ret)
+               return ret;
 
+       data->watchdog_output_enable = val;
        return 0;
 }
 
-static int watchdog_open(struct inode *inode, struct file *filp)
+static void watchdog_ref(struct watchdog_device *wddev)
 {
-       struct sch56xx_watchdog_data *pos, *data = NULL;
-       int ret, watchdog_is_open;
-
-       /*
-        * We get called from drivers/char/misc.c with misc_mtx hold, and we
-        * call misc_register() from sch56xx_watchdog_probe() with
-        * watchdog_data_mutex hold, as misc_register() takes the misc_mtx
-        * lock, this is a possible deadlock, so we use mutex_trylock here.
-        */
-       if (!mutex_trylock(&watchdog_data_mutex))
-               return -ERESTARTSYS;
-       list_for_each_entry(pos, &watchdog_data_list, list) {
-               if (pos->watchdog_miscdev.minor == iminor(inode)) {
-                       data = pos;
-                       break;
-               }
-       }
-       /* Note we can never not have found data, so we don't check for this */
-       watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
-       if (!watchdog_is_open)
-               kref_get(&data->kref);
-       mutex_unlock(&watchdog_data_mutex);
-
-       if (watchdog_is_open)
-               return -EBUSY;
-
-       filp->private_data = data;
-
-       /* Start the watchdog */
-       ret = watchdog_start(data);
-       if (ret) {
-               watchdog_release(inode, filp);
-               return ret;
-       }
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 
-       return nonseekable_open(inode, filp);
+       kref_get(&data->kref);
 }
 
-static ssize_t watchdog_write(struct file *filp, const char __user *buf,
-       size_t count, loff_t *offset)
+static void watchdog_unref(struct watchdog_device *wddev)
 {
-       int ret;
-       struct sch56xx_watchdog_data *data = filp->private_data;
-
-       if (count) {
-               if (!nowayout) {
-                       size_t i;
-
-                       /* Clear it in case it was set with a previous write */
-                       data->watchdog_expect_close = 0;
-
-                       for (i = 0; i != count; i++) {
-                               char c;
-                               if (get_user(c, buf + i))
-                                       return -EFAULT;
-                               if (c == 'V')
-                                       data->watchdog_expect_close = 1;
-                       }
-               }
-               ret = watchdog_trigger(data);
-               if (ret)
-                       return ret;
-       }
-       return count;
-}
-
-static long watchdog_ioctl(struct file *filp, unsigned int cmd,
-                          unsigned long arg)
-{
-       struct watchdog_info ident = {
-               .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
-               .identity = "sch56xx watchdog"
-       };
-       int i, ret = 0;
-       struct sch56xx_watchdog_data *data = filp->private_data;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               ident.firmware_version = data->revision;
-               if (!nowayout)
-                       ident.options |= WDIOF_MAGICCLOSE;
-               if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
-                       ret = -EFAULT;
-               break;
-
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               ret = put_user(0, (int __user *)arg);
-               break;
-
-       case WDIOC_KEEPALIVE:
-               ret = watchdog_trigger(data);
-               break;
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 
-       case WDIOC_GETTIMEOUT:
-               i = watchdog_get_timeout(data);
-               ret = put_user(i, (int __user *)arg);
-               break;
-
-       case WDIOC_SETTIMEOUT:
-               if (get_user(i, (int __user *)arg)) {
-                       ret = -EFAULT;
-                       break;
-               }
-               ret = watchdog_set_timeout(data, i);
-               if (ret >= 0)
-                       ret = put_user(ret, (int __user *)arg);
-               break;
-
-       case WDIOC_SETOPTIONS:
-               if (get_user(i, (int __user *)arg)) {
-                       ret = -EFAULT;
-                       break;
-               }
-
-               if (i & WDIOS_DISABLECARD)
-                       ret = watchdog_stop(data);
-               else if (i & WDIOS_ENABLECARD)
-                       ret = watchdog_trigger(data);
-               else
-                       ret = -EINVAL;
-               break;
-
-       default:
-               ret = -ENOTTY;
-       }
-       return ret;
+       kref_put(&data->kref, watchdog_release_resources);
 }
 
-static const struct file_operations watchdog_fops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .open = watchdog_open,
-       .release = watchdog_release,
-       .write = watchdog_write,
-       .unlocked_ioctl = watchdog_ioctl,
+static const struct watchdog_ops watchdog_ops = {
+       .owner          = THIS_MODULE,
+       .start          = watchdog_start,
+       .stop           = watchdog_stop,
+       .ping           = watchdog_trigger,
+       .set_timeout    = watchdog_set_timeout,
+       .ref            = watchdog_ref,
+       .unref          = watchdog_unref,
 };
 
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
        u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
 {
        struct sch56xx_watchdog_data *data;
-       int i, err, control, output_enable;
-       const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+       int err, control, output_enable;
 
        /* Cache the watchdog registers */
        mutex_lock(io_lock);
@@ -656,82 +447,55 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(
                return NULL;
 
        data->addr = addr;
-       data->revision = revision;
        data->io_lock = io_lock;
-       data->watchdog_control = control;
-       data->watchdog_output_enable = output_enable;
-       mutex_init(&data->watchdog_lock);
-       INIT_LIST_HEAD(&data->list);
        kref_init(&data->kref);
 
-       err = watchdog_set_timeout(data, 60);
-       if (err < 0)
-               goto error;
-
-       /*
-        * We take the data_mutex lock early so that watchdog_open() cannot
-        * run when misc_register() has completed, but we've not yet added
-        * our data to the watchdog_data_list.
-        */
-       mutex_lock(&watchdog_data_mutex);
-       for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
-               /* Register our watchdog part */
-               snprintf(data->watchdog_name, sizeof(data->watchdog_name),
-                       "watchdog%c", (i == 0) ? '\0' : ('0' + i));
-               data->watchdog_miscdev.name = data->watchdog_name;
-               data->watchdog_miscdev.fops = &watchdog_fops;
-               data->watchdog_miscdev.minor = watchdog_minors[i];
-               err = misc_register(&data->watchdog_miscdev);
-               if (err == -EBUSY)
-                       continue;
-               if (err)
-                       break;
+       strlcpy(data->wdinfo.identity, "sch56xx watchdog",
+               sizeof(data->wdinfo.identity));
+       data->wdinfo.firmware_version = revision;
+       data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
+       if (!nowayout)
+               data->wdinfo.options |= WDIOF_MAGICCLOSE;
+
+       data->wddev.info = &data->wdinfo;
+       data->wddev.ops = &watchdog_ops;
+       data->wddev.parent = parent;
+       data->wddev.timeout = 60;
+       data->wddev.min_timeout = 1;
+       data->wddev.max_timeout = 255 * 60;
+       if (nowayout)
+               set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+       if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
+               set_bit(WDOG_ACTIVE, &data->wddev.status);
+
+       /* Since the watchdog uses a downcounter there is no register to read
+          the BIOS set timeout from (if any was set at all) ->
+          Choose a preset which will give us a 1 minute timeout */
+       if (control & SCH56XX_WDOG_TIME_BASE_SEC)
+               data->watchdog_preset = 60; /* seconds */
+       else
+               data->watchdog_preset = 1; /* minute */
 
-               list_add(&data->list, &watchdog_data_list);
-               pr_info("Registered /dev/%s chardev major 10, minor: %d\n",
-                       data->watchdog_name, watchdog_minors[i]);
-               break;
-       }
-       mutex_unlock(&watchdog_data_mutex);
+       data->watchdog_control = control;
+       data->watchdog_output_enable = output_enable;
 
+       watchdog_set_drvdata(&data->wddev, data);
+       err = watchdog_register_device(&data->wddev);
        if (err) {
                pr_err("Registering watchdog chardev: %d\n", err);
-               goto error;
-       }
-       if (i == ARRAY_SIZE(watchdog_minors)) {
-               pr_warn("Couldn't register watchdog (no free minor)\n");
-               goto error;
+               kfree(data);
+               return NULL;
        }
 
        return data;
-
-error:
-       kfree(data);
-       return NULL;
 }
 EXPORT_SYMBOL(sch56xx_watchdog_register);
 
 void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
 {
-       mutex_lock(&watchdog_data_mutex);
-       misc_deregister(&data->watchdog_miscdev);
-       list_del(&data->list);
-       mutex_unlock(&watchdog_data_mutex);
-
-       mutex_lock(&data->watchdog_lock);
-       if (data->watchdog_is_open) {
-               pr_warn("platform device unregistered with watchdog "
-                       "open! Stopping watchdog.\n");
-               watchdog_stop_unlocked(data);
-       }
-       /* Tell the wdog start/stop/trigger functions our dev is gone */
-       data->addr = 0;
-       data->io_lock = NULL;
-       mutex_unlock(&data->watchdog_lock);
-
-       mutex_lock(&watchdog_data_mutex);
-       kref_put(&data->kref, sch56xx_watchdog_release_resources);
-       mutex_unlock(&watchdog_data_mutex);
+       watchdog_unregister_device(&data->wddev);
+       kref_put(&data->kref, watchdog_release_resources);
+       /* Don't touch data after this it may have been free-ed! */
 }
 EXPORT_SYMBOL(sch56xx_watchdog_unregister);
 
index 7475086eb978e148f2e5c8039f7bedad2052b0ea..704ea2c6d28a772695d73000f1480032dcd649e8 100644 (file)
@@ -27,6 +27,6 @@ int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
 int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
                               int high_nibble);
 
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
        u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
 void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
index 61c9cf15fa52ecd50cb1a1b5421c8fef3666f24c..1201a15784c3a0eec329affa6f6edf1546df0b51 100644 (file)
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
                spin_lock_init(&hwlock->lock);
                hwlock->bank = bank;
 
-               ret = hwspin_lock_register_single(hwlock, i);
+               ret = hwspin_lock_register_single(hwlock, base_id + i);
                if (ret)
                        goto reg_failed;
        }
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
 
 reg_failed:
        while (--i >= 0)
-               hwspin_lock_unregister_single(i);
+               hwspin_lock_unregister_single(base_id + i);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_register);
index 7f0b83219744e52aa7cd68a7ac5ea6edc49537d1..fad22b0bb5b06fff58eb7f5fd49533fd841a7464 100644 (file)
@@ -608,7 +608,7 @@ bailout:
 
 static u32 bit_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+       return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
               I2C_FUNC_SMBUS_READ_BLOCK_DATA |
               I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
               I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
index 03b61577888748a4d9a61cfe46b413eaaa219737..a26dfb8cd58690ce3df06c1c1685118899259a11 100644 (file)
@@ -502,7 +502,8 @@ static int nuc900_i2c_xfer(struct i2c_adapter *adap,
 /* declare our i2c functionality */
 static u32 nuc900_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+               I2C_FUNC_PROTOCOL_MANGLING;
 }
 
 /* i2c bus registration info */
index fa0b134908731019a8490566fb7c0f6e4fa10f24..01959154572d88f0eb954327759fc66bcf9da5c8 100644 (file)
@@ -626,7 +626,8 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
 /* declare our i2c functionality */
 static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+               I2C_FUNC_PROTOCOL_MANGLING;
 }
 
 /* i2c bus registration info */
index 45048323b75eab01d5875cb05464207c2eb614b3..5ec2261574ec4fdb7e2b6fd3d8b3879c6ff187f0 100644 (file)
@@ -265,19 +265,41 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
 
        res = 0;
        for (i = 0; i < rdwr_arg.nmsgs; i++) {
-               /* Limit the size of the message to a sane amount;
-                * and don't let length change either. */
-               if ((rdwr_pa[i].len > 8192) ||
-                   (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+               /* Limit the size of the message to a sane amount */
+               if (rdwr_pa[i].len > 8192) {
                        res = -EINVAL;
                        break;
                }
+
                data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
                rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
                if (IS_ERR(rdwr_pa[i].buf)) {
                        res = PTR_ERR(rdwr_pa[i].buf);
                        break;
                }
+
+               /*
+                * If the message length is received from the slave (similar
+                * to SMBus block read), we must ensure that the buffer will
+                * be large enough to cope with a message length of
+                * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
+                * drivers allow. The first byte in the buffer must be
+                * pre-filled with the number of extra bytes, which must be
+                * at least one to hold the message length, but can be
+                * greater (for example to account for a checksum byte at
+                * the end of the message.)
+                */
+               if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
+                       if (!(rdwr_pa[i].flags & I2C_M_RD) ||
+                           rdwr_pa[i].buf[0] < 1 ||
+                           rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+                                            I2C_SMBUS_BLOCK_MAX) {
+                               res = -EINVAL;
+                               break;
+                       }
+
+                       rdwr_pa[i].len = rdwr_pa[i].buf[0];
+               }
        }
        if (res < 0) {
                int j;
index beb2491db274ade737e785931920c2ef2b10771b..a0edd98542189680073c04b933bf855d0f492aea 100644 (file)
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x
          This driver can also be built as a module.  If so, the module
          will be called i2c-mux-pca954x.
 
+config I2C_MUX_PINCTRL
+       tristate "pinctrl-based I2C multiplexer"
+       depends on PINCTRL
+       help
+         If you say yes to this option, support will be included for an I2C
+         multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
+         This is useful for SoCs whose I2C module's signals can be routed to
+         different sets of pins at run-time.
+
+         This driver can also be built as a module. If so, the module will be
+         called pinctrl-i2cmux.
+
 endmenu
index 5826249b29ca4664f32a0f1e75e97bc70056b3c7..76da8692afff037074041747898202cbedced244 100644 (file)
@@ -4,5 +4,6 @@
 obj-$(CONFIG_I2C_MUX_GPIO)     += i2c-mux-gpio.o
 obj-$(CONFIG_I2C_MUX_PCA9541)  += i2c-mux-pca9541.o
 obj-$(CONFIG_I2C_MUX_PCA954x)  += i2c-mux-pca954x.o
+obj-$(CONFIG_I2C_MUX_PINCTRL)  += i2c-mux-pinctrl.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644 (file)
index 0000000..46a6697
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * I2C multiplexer using pinctrl API
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c-mux-pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct i2c_mux_pinctrl {
+       struct device *dev;
+       struct i2c_mux_pinctrl_platform_data *pdata;
+       struct pinctrl *pinctrl;
+       struct pinctrl_state **states;
+       struct pinctrl_state *state_idle;
+       struct i2c_adapter *parent;
+       struct i2c_adapter **busses;
+};
+
+static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
+                                 u32 chan)
+{
+       struct i2c_mux_pinctrl *mux = data;
+
+       return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
+}
+
+static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
+                                   u32 chan)
+{
+       struct i2c_mux_pinctrl *mux = data;
+
+       return pinctrl_select_state(mux->pinctrl, mux->state_idle);
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+                               struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       int num_names, i, ret;
+       struct device_node *adapter_np;
+       struct i2c_adapter *adapter;
+
+       if (!np)
+               return 0;
+
+       mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
+       if (!mux->pdata) {
+               dev_err(mux->dev,
+                       "Cannot allocate i2c_mux_pinctrl_platform_data\n");
+               return -ENOMEM;
+       }
+
+       num_names = of_property_count_strings(np, "pinctrl-names");
+       if (num_names < 0) {
+               dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+                       num_names);
+               return num_names;
+       }
+
+       mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
+               sizeof(*mux->pdata->pinctrl_states) * num_names,
+               GFP_KERNEL);
+       if (!mux->pdata->pinctrl_states) {
+               dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < num_names; i++) {
+               ret = of_property_read_string_index(np, "pinctrl-names", i,
+                       &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
+               if (ret < 0) {
+                       dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+                               ret);
+                       return ret;
+               }
+               if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
+                           "idle")) {
+                       if (i != num_names - 1) {
+                               dev_err(mux->dev, "idle state must be last\n");
+                               return -EINVAL;
+                       }
+                       mux->pdata->pinctrl_state_idle = "idle";
+               } else {
+                       mux->pdata->bus_count++;
+               }
+       }
+
+       adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+       if (!adapter_np) {
+               dev_err(mux->dev, "Cannot parse i2c-parent\n");
+               return -ENODEV;
+       }
+       adapter = of_find_i2c_adapter_by_node(adapter_np);
+       if (!adapter) {
+               dev_err(mux->dev, "Cannot find parent bus\n");
+               return -ENODEV;
+       }
+       mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
+       put_device(&adapter->dev);
+
+       return 0;
+}
+#else
+static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+                                          struct platform_device *pdev)
+{
+       return 0;
+}
+#endif
+
+static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+{
+       struct i2c_mux_pinctrl *mux;
+       int (*deselect)(struct i2c_adapter *, void *, u32);
+       int i, ret;
+
+       mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+       if (!mux) {
+               dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       platform_set_drvdata(pdev, mux);
+
+       mux->dev = &pdev->dev;
+
+       mux->pdata = pdev->dev.platform_data;
+       if (!mux->pdata) {
+               ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
+               if (ret < 0)
+                       goto err;
+       }
+       if (!mux->pdata) {
+               dev_err(&pdev->dev, "Missing platform data\n");
+               ret = -ENODEV;
+               goto err;
+       }
+
+       mux->states = devm_kzalloc(&pdev->dev,
+                                  sizeof(*mux->states) * mux->pdata->bus_count,
+                                  GFP_KERNEL);
+       if (!mux->states) {
+               dev_err(&pdev->dev, "Cannot allocate states\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       mux->busses = devm_kzalloc(&pdev->dev,
+                                  sizeof(mux->busses) * mux->pdata->bus_count,
+                                  GFP_KERNEL);
+       if (!mux->states) {
+               dev_err(&pdev->dev, "Cannot allocate busses\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       mux->pinctrl = devm_pinctrl_get(&pdev->dev);
+       if (IS_ERR(mux->pinctrl)) {
+               ret = PTR_ERR(mux->pinctrl);
+               dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
+               goto err;
+       }
+       for (i = 0; i < mux->pdata->bus_count; i++) {
+               mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
+                                               mux->pdata->pinctrl_states[i]);
+                       if (IS_ERR(mux->states[i])) {
+                               ret = PTR_ERR(mux->states[i]);
+                               dev_err(&pdev->dev,
+                                       "Cannot look up pinctrl state %s: %d\n",
+                                       mux->pdata->pinctrl_states[i], ret);
+                               goto err;
+                       }
+       }
+       if (mux->pdata->pinctrl_state_idle) {
+               mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
+                                               mux->pdata->pinctrl_state_idle);
+               if (IS_ERR(mux->state_idle)) {
+                       ret = PTR_ERR(mux->state_idle);
+                       dev_err(&pdev->dev,
+                               "Cannot look up pinctrl state %s: %d\n",
+                               mux->pdata->pinctrl_state_idle, ret);
+                       goto err;
+               }
+
+               deselect = i2c_mux_pinctrl_deselect;
+       } else {
+               deselect = NULL;
+       }
+
+       mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+       if (!mux->parent) {
+               dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+                       mux->pdata->parent_bus_num);
+               ret = -ENODEV;
+               goto err;
+       }
+
+       for (i = 0; i < mux->pdata->bus_count; i++) {
+               u32 bus = mux->pdata->base_bus_num ?
+                               (mux->pdata->base_bus_num + i) : 0;
+
+               mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
+                                                    mux, bus, i,
+                                                    i2c_mux_pinctrl_select,
+                                                    deselect);
+               if (!mux->busses[i]) {
+                       ret = -ENODEV;
+                       dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+                       goto err_del_adapter;
+               }
+       }
+
+       return 0;
+
+err_del_adapter:
+       for (; i > 0; i--)
+               i2c_del_mux_adapter(mux->busses[i - 1]);
+       i2c_put_adapter(mux->parent);
+err:
+       return ret;
+}
+
+static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+{
+       struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < mux->pdata->bus_count; i++)
+               i2c_del_mux_adapter(mux->busses[i]);
+
+       i2c_put_adapter(mux->parent);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+       { .compatible = "i2c-mux-pinctrl", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
+#endif
+
+static struct platform_driver i2c_mux_pinctrl_driver = {
+       .driver = {
+               .name   = "i2c-mux-pinctrl",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+       },
+       .probe  = i2c_mux_pinctrl_probe,
+       .remove = __devexit_p(i2c_mux_pinctrl_remove),
+};
+module_platform_driver(i2c_mux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-mux-pinctrl");
index 8716066a2f2b79c1ddeb1236a9b2091d7bd62f63..bcb507b0cfd4c22f4b9b988ac9c789f3e27dc3cf 100644 (file)
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
  */
 static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 {
-       unsigned long cycle_time;
+       unsigned long cycle_time = 0;
        int use_dma_info = 0;
        const u8 xfer_mode = drive->dma_mode;
 
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 
        ide_set_drivedata(drive, (void *)cycle_time);
 
-       printk("%s: %s selected (peak %dMB/s)\n", drive->name,
-               ide_xfer_verbose(xfer_mode),
-               2000 / (unsigned long)ide_get_drivedata(drive));
+       printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
+              drive->name, ide_xfer_verbose(xfer_mode),
+              2000 / (cycle_time ? cycle_time : (unsigned long) -1));
 }
 
 static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
        .dma_test_irq           = icside_dma_test_irq,
        .dma_lost_irq           = ide_dma_lost_irq,
 };
-#else
-#define icside_v6_dma_ops NULL
 #endif
 
 static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@ err_free:
 static const struct ide_port_info icside_v6_port_info __initdata = {
        .init_dma               = icside_dma_off_init,
        .port_ops               = &icside_v6_no_dma_port_ops,
-       .dma_ops                = &icside_v6_dma_ops,
        .host_flags             = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
        .mwdma_mask             = ATA_MWDMA2,
        .swdma_mask             = ATA_SWDMA2,
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
 
        ecard_set_drvdata(ec, state);
 
+#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
        if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
                d.init_dma = icside_dma_init;
                d.port_ops = &icside_v6_port_ops;
-       } else
-               d.dma_ops = NULL;
+               d.dma_ops  = &icside_v6_dma_ops;
+       }
+#endif
 
        ret = ide_host_register(host, &d, hws);
        if (ret)
index 28e344ea514cc18ba8060c6cb6d6d2beb0a2a70b..f1e922e2479af988f7da82b838a1b337414cd8f2 100644 (file)
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
 {
        int *is_kme = priv_data;
 
-       if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
+       if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
+           != IO_DATA_PATH_WIDTH_8) {
                pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
                pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
        }
index 56eecefcec7581743c274912446b7cec052ef9cd..2ec93da41e2c49164b5f905572fa6f314edf1968 100644 (file)
@@ -8,8 +8,7 @@ menuconfig IIO
        help
          The industrial I/O subsystem provides a unified framework for
          drivers for many different types of embedded sensors using a
-         number of different physical interfaces (i2c, spi, etc). See
-         Documentation/iio for more information.
+         number of different physical interfaces (i2c, spi, etc).
 
 if IIO
 
index 1ddd8861c71b543f449ddc5794af2db5a6f0c241..4f947e4377eff4fb6cfbec8d29fd18230e446555 100644 (file)
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
         * New channel registration method - relies on the fact a group does
         * not need to be initialized if it is name is NULL.
         */
-       INIT_LIST_HEAD(&indio_dev->channel_attr_list);
        if (indio_dev->channels)
                for (i = 0; i < indio_dev->num_channels; i++) {
                        ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 static void iio_dev_release(struct device *device)
 {
        struct iio_dev *indio_dev = dev_to_iio_dev(device);
-       cdev_del(&indio_dev->chrdev);
+       if (indio_dev->chrdev.dev)
+               cdev_del(&indio_dev->chrdev);
        if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
                iio_device_unregister_trigger_consumer(indio_dev);
        iio_device_unregister_eventset(indio_dev);
        iio_device_unregister_sysfs(indio_dev);
        iio_device_unregister_debugfs(indio_dev);
+
+       ida_simple_remove(&iio_ida, indio_dev->id);
+       kfree(indio_dev);
 }
 
 static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
                dev_set_drvdata(&dev->dev, (void *)dev);
                mutex_init(&dev->mlock);
                mutex_init(&dev->info_exist_lock);
+               INIT_LIST_HEAD(&dev->channel_attr_list);
 
                dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
                if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
 
 void iio_device_free(struct iio_dev *dev)
 {
-       if (dev) {
-               ida_simple_remove(&iio_ida, dev->id);
-               kfree(dev);
-       }
+       if (dev)
+               put_device(&dev->dev);
 }
 EXPORT_SYMBOL(iio_device_free);
 
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
        mutex_lock(&indio_dev->info_exist_lock);
        indio_dev->info = NULL;
        mutex_unlock(&indio_dev->info_exist_lock);
-       device_unregister(&indio_dev->dev);
+       device_del(&indio_dev->dev);
 }
 EXPORT_SYMBOL(iio_device_unregister);
 subsys_initcall(iio_init);
index 55d5642eb10ada70ec2f87419e5909ae52906323..2e826f9702c6d76cfc895c24466c068a8e042a83 100644 (file)
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
 
 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
 {
-       return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+       return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
                 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
                ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
                 (id->qp_type == IB_QPT_UD)) ||
index 55ab284e22f2c02c037ca67825d24e3d8a827208..b18870c455adde5546ec90a4bf356813bbb34432 100644 (file)
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
                struct net_device *pdev;
 
                pdev = ip_dev_find(&init_net, peer_ip);
+               if (!pdev) {
+                       err = -ENODEV;
+                       goto out;
+               }
                ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
                                        n, pdev, 0);
                if (!ep->l2t)
index ee1c577238f7a261c4773c49372ad6ff2c304c0d..3530c41fcd1f28036f0c0a7d951e811bf77752d9 100644 (file)
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->max_mr_size         = ~0ull;
        props->page_size_cap       = dev->dev->caps.page_size_cap;
        props->max_qp              = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
-       props->max_qp_wr           = dev->dev->caps.max_wqes;
+       props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
        props->max_sge             = min(dev->dev->caps.max_sq_sg,
                                         dev->dev->caps.max_rq_sg);
        props->max_cq              = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
        int total_eqs = 0;
        int i, j, eq;
 
-       /* Init eq table */
-       ibdev->eq_table = NULL;
-       ibdev->eq_added = 0;
-
-       /* Legacy mode? */
-       if (dev->caps.comp_pool == 0)
+       /* Legacy mode or comp_pool is not large enough */
+       if (dev->caps.comp_pool == 0 ||
+           dev->caps.num_ports > dev->caps.comp_pool)
                return;
 
        eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
        int i;
-       int total_eqs;
+
+       /* no additional eqs were added */
+       if (!ibdev->eq_table)
+               return;
 
        /* Reset the advertised EQ number */
        ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
                mlx4_release_eq(dev, ibdev->eq_table[i]);
        }
 
-       total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
-       memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
        kfree(ibdev->eq_table);
-
-       ibdev->eq_table = NULL;
-       ibdev->eq_added = 0;
 }
 
 static void *mlx4_ib_add(struct mlx4_dev *dev)
index e62297cc77cc24396d3460498720508dd3e0fde0..ff36655d23d387953fd9a1fc1b7fb8ffa1bf1153 100644 (file)
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
 
+enum {
+       MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
+       MLX4_IB_MAX_HEADROOM     = 2048
+};
+
+#define MLX4_IB_SQ_HEADROOM(shift)     ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
+#define MLX4_IB_SQ_MAX_SPARE           (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
+
 struct mlx4_ib_ucontext {
        struct ib_ucontext      ibucontext;
        struct mlx4_uar         uar;
index ceb33327091a8244a8d16d73766c5c2eab3840c1..8d4ed24aef931e1e89186a9a5cdfccbfaf268771 100644 (file)
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                       int is_user, int has_rq, struct mlx4_ib_qp *qp)
 {
        /* Sanity check RQ size before proceeding */
-       if (cap->max_recv_wr  > dev->dev->caps.max_wqes  ||
-           cap->max_recv_sge > dev->dev->caps.max_rq_sg)
+       if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
+           cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
                return -EINVAL;
 
        if (!has_rq) {
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
        }
 
-       cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
-       cap->max_recv_sge = qp->rq.max_gs;
+       /* leave userspace return values as they were, so as not to break ABI */
+       if (is_user) {
+               cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
+               cap->max_recv_sge = qp->rq.max_gs;
+       } else {
+               cap->max_recv_wr  = qp->rq.max_post =
+                       min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
+               cap->max_recv_sge = min(qp->rq.max_gs,
+                                       min(dev->dev->caps.max_sq_sg,
+                                           dev->dev->caps.max_rq_sg));
+       }
 
        return 0;
 }
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
        int s;
 
        /* Sanity check SQ size before proceeding */
-       if (cap->max_send_wr     > dev->dev->caps.max_wqes  ||
-           cap->max_send_sge    > dev->dev->caps.max_sq_sg ||
+       if (cap->max_send_wr  > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
+           cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
            cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
            sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
                return -EINVAL;
index 85a69c958559b8a1aedf7bab27d3ad2097268656..48970af236794cea69ace19502a874071c1ee0cb 100644 (file)
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
        u32 max_inline_data;
        int max_send_sge;
        int max_recv_sge;
+       int max_srq_sge;
        int max_mr;
        u64 max_mr_size;
        u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info {
        u32 entry_size;
        u32 max_cnt;
        u32 max_wqe_idx;
-       u32 free_delta;
        u16 dbid;               /* qid, where to ring the doorbell. */
        u32 len;
        dma_addr_t pa;
index a411a4e3193d33dd7b113210868e1d754ea26af8..517ab20b727c51feac5314a64e717adfdedacde4 100644 (file)
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp {
        u32 rsvd1;
        u32 num_wqe_allocated;
        u32 num_rqe_allocated;
-       u32 free_wqe_delta;
-       u32 free_rqe_delta;
        u32 db_sq_offset;
        u32 db_rq_offset;
        u32 db_shift;
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp {
        u32 db_rq_offset;
        u32 db_shift;
 
-       u32 free_rqe_delta;
-       u32 rsvd2;
+       u64 rsvd2;
        u64 rsvd3;
 } __packed;
 
index 9b204b1ba3366d624bfc01213eb122179e295310..71942af4fce94695753dacbb8417ee191aaaf9a6 100644 (file)
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
                break;
        case OCRDMA_SRQ_LIMIT_EVENT:
                ib_evt.element.srq = &qp->srq->ibsrq;
-               ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+               ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
                srq_event = 1;
                qp_event = 0;
                break;
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
                              struct ocrdma_dev_attr *attr,
                              struct ocrdma_mbx_query_config *rsp)
 {
-       int max_q_mem;
-
        attr->max_pd =
            (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_recv_sge = (rsp->max_write_send_sge &
                              OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+       attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
        attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
                                OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_inline_data =
            attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
                              sizeof(struct ocrdma_sge));
-       max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
-       /* hw can queue one less then the configured size,
-        * so publish less by one to stack.
-        */
        if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
-               dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
                attr->ird = 1;
                attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
                attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
-       } else
-               dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
-       dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+       }
+       dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+                OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+       dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+               OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
 }
 
 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
        max_wqe_allocated = 1 << max_wqe_allocated;
        max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
 
-       if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
-               qp->sq.free_delta = 0;
-               qp->rq.free_delta = 1;
-       } else
-               qp->sq.free_delta = 1;
-
        qp->sq.max_cnt = max_wqe_allocated;
        qp->sq.max_wqe_idx = max_wqe_allocated - 1;
 
        if (!attrs->srq) {
                qp->rq.max_cnt = max_rqe_allocated;
                qp->rq.max_wqe_idx = max_rqe_allocated - 1;
-               qp->rq.free_delta = 1;
        }
 }
 
index a20d16eaae71ae5e1bb4da23979db0f26a287f3c..b050e629e9c3bd19d2e089fdd7792a53d49fe2b0 100644 (file)
@@ -26,7 +26,6 @@
  *******************************************************************/
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/idr.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
        sgid->raw[15] = mac_addr[5];
 }
 
-static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
                            bool is_vlan, u16 vlan_id)
 {
        int i;
-       bool found = false;
        union ib_gid new_sgid;
-       int free_idx = OCRDMA_MAX_SGID;
        unsigned long flags;
 
        memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
                if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
                            sizeof(union ib_gid))) {
                        /* found free entry */
-                       if (!found) {
-                               free_idx = i;
-                               found = true;
-                               break;
-                       }
+                       memcpy(&dev->sgid_tbl[i], &new_sgid,
+                              sizeof(union ib_gid));
+                       spin_unlock_irqrestore(&dev->sgid_lock, flags);
+                       return true;
                } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
                                   sizeof(union ib_gid))) {
                        /* entry already present, no addition is required. */
                        spin_unlock_irqrestore(&dev->sgid_lock, flags);
-                       return;
+                       return false;
                }
        }
-       /* if entry doesn't exist and if table has some space, add entry */
-       if (found)
-               memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
-                      sizeof(union ib_gid));
        spin_unlock_irqrestore(&dev->sgid_lock, flags);
+       return false;
 }
 
 static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
        ocrdma_get_guid(dev, &sgid->raw[8]);
 }
 
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
 {
        struct net_device *netdev, *tmp;
        u16 vlan_id;
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
 
        netdev = dev->nic_info.netdev;
 
-       ocrdma_add_default_sgid(dev);
-
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, tmp) {
                if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
                }
        }
        rcu_read_unlock();
+}
+#else
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+{
+
+}
+#endif /* VLAN */
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+       ocrdma_add_default_sgid(dev);
+       ocrdma_add_vlan_sgids(dev);
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
+defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 
 static int ocrdma_inet6addr_event(struct notifier_block *notifier,
                                  unsigned long event, void *ptr)
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
        struct ib_event gid_event;
        struct ocrdma_dev *dev;
        bool found = false;
+       bool updated = false;
        bool is_vlan = false;
        u16 vid = 0;
 
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
        mutex_lock(&dev->dev_lock);
        switch (event) {
        case NETDEV_UP:
-               ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+               updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
                break;
        case NETDEV_DOWN:
-               found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
-               if (found) {
-                       /* found the matching entry, notify
-                        * the consumers about it
-                        */
-                       gid_event.device = &dev->ibdev;
-                       gid_event.element.port_num = 1;
-                       gid_event.event = IB_EVENT_GID_CHANGE;
-                       ib_dispatch_event(&gid_event);
-               }
+               updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
                break;
        default:
                break;
        }
+       if (updated) {
+               /* GID table updated, notify the consumers about it */
+               gid_event.device = &dev->ibdev;
+               gid_event.element.port_num = 1;
+               gid_event.event = IB_EVENT_GID_CHANGE;
+               ib_dispatch_event(&gid_event);
+       }
        mutex_unlock(&dev->dev_lock);
        return NOTIFY_OK;
 }
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
        .notifier_call = ocrdma_inet6addr_event
 };
 
-#endif /* IPV6 */
+#endif /* IPV6 and VLAN */
 
 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
                                              u8 port_num)
index 7fd80cc0f0374159d5b9de9492bc6bfc64a838e4..c75cbdfa87e7b82a3e3d6cd5d1e7be78b4dd1f19 100644 (file)
@@ -418,6 +418,9 @@ enum {
 
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK          = 0xFFFF,
+       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT        = 16,
+       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK         = 0xFFFF <<
+                               OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
 
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT       = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK        = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
                                OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
        OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET     = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK       = 0xFFFF <<
-                               OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+                               OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
 
        OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET              = 16,
        OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK                = 0xFFFF <<
index e9f74d1b48f639f9d4ae6ff31cff0f0fa58bd173..2e2e7aecc9907a87432dce09b218e046a141c124 100644 (file)
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
 
        dev = get_ocrdma_dev(ibdev);
        memset(sgid, 0, sizeof(*sgid));
-       if (index > OCRDMA_MAX_SGID)
+       if (index >= OCRDMA_MAX_SGID)
                return -EINVAL;
 
        memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
                                        IB_DEVICE_SHUTDOWN_PORT |
                                        IB_DEVICE_SYS_IMAGE_GUID |
                                        IB_DEVICE_LOCAL_DMA_LKEY;
-       attr->max_sge = dev->attr.max_send_sge;
-       attr->max_sge_rd = dev->attr.max_send_sge;
+       attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
+       attr->max_sge_rd = 0;
        attr->max_cq = dev->attr.max_cq;
        attr->max_cqe = dev->attr.max_cqe;
        attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
            min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
        attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
        attr->max_srq = (dev->attr.max_qp - 1);
-       attr->max_srq_sge = attr->max_sge;
+       attr->max_srq_sge = attr->max_srq_sge;
        attr->max_srq_wr = dev->attr.max_rqe;
        attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
        attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
                uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
                uresp.db_shift = 16;
        }
-       uresp.free_wqe_delta = qp->sq.free_delta;
-       uresp.free_rqe_delta = qp->rq.free_delta;
 
        if (qp->dpp_enabled) {
                uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
                free_cnt = (q->max_cnt - q->head) + q->tail;
        else
                free_cnt = q->tail - q->head;
-       if (q->free_delta)
-               free_cnt -= q->free_delta;
        return free_cnt;
 }
 
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
            (srq->pd->id * srq->dev->nic_info.db_page_size);
        uresp.db_page_size = srq->dev->nic_info.db_page_size;
        uresp.num_rqe_allocated = srq->rq.max_cnt;
-       uresp.free_rqe_delta = 1;
        if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
                uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
                uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
                        *stop = true;
                        expand = false;
                }
-       } else
+       } else {
+               *polled = true;
                expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+       }
        return expand;
 }
 
index e6483439f25f0dc6d1a2fac2b4d15e53b9606cb5..633f03d802746908bcb4f7a58adef800b024ce2f 100644 (file)
@@ -28,7 +28,6 @@
 #ifndef __OCRDMA_VERBS_H__
 #define __OCRDMA_VERBS_H__
 
-#include <linux/version.h>
 int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
                     struct ib_send_wr **bad_wr);
 int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
index 5c1bc995e5603ced273bc4731ff67ff88eaf6f76..f10221f40803959198a3b85e21b8b57ce01ea60a 100644 (file)
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
 
                skb_frag_size_set(frag, size);
                skb->data_len += size;
-               skb->truesize += size;
+               skb->truesize += PAGE_SIZE;
        } else
                skb_put(skb, length);
 
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
        int buf_size;
+       int tailroom;
        u64 *mapping;
 
-       if (ipoib_ud_need_sg(priv->max_ib_mtu))
+       if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
                buf_size = IPOIB_UD_HEAD_SIZE;
-       else
+               tailroom = 128; /* reserve some tailroom for IP/TCP headers */
+       } else {
                buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+               tailroom = 0;
+       }
 
-       skb = dev_alloc_skb(buf_size + 4);
+       skb = dev_alloc_skb(buf_size + tailroom + 4);
        if (unlikely(!skb))
                return NULL;
 
index 3063464474bf637e1a65d8dd52c144bb95ffba1d..c96653b58867deb406a13913774a7e35c074dec7 100644 (file)
@@ -231,6 +231,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
        }
 
        if (!i2c_check_functionality(client->adapter,
+                                    I2C_FUNC_NOSTART |
                                     I2C_FUNC_PROTOCOL_MANGLING)) {
                dev_err(&client->dev,
                        "need i2c bus that supports protocol mangling\n");
@@ -281,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->button_irq,
                                     NULL, as5011_button_interrupt,
-                                    IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_RISING |
+                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     "as5011_button", as5011);
        if (error < 0) {
                dev_err(&client->dev,
@@ -295,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->axis_irq, NULL,
                                     as5011_axis_interrupt,
-                                    plat_data->axis_irqflags,
+                                    plat_data->axis_irqflags | IRQF_ONESHOT,
                                     "as5011_joystick", as5011);
        if (error) {
                dev_err(&client->dev,
index ee16fb67b7ae235bd9469bc50b9b0f588b99a2e4..83811e45d6339b013cc4f9cc16b90c5c66ba8af8 100644 (file)
@@ -142,6 +142,7 @@ static const struct xpad_device {
        { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
        { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
        { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+       { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
        { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
        { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
        { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+       { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
        { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
 };
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
        XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
+       { USB_DEVICE(0x0738, 0x4540) },         /* Mad Catz Beat Pad */
        XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x146b),            /* BigBen Interactive Controllers */
        XPAD_XBOX360_VENDOR(0x1bad),            /* Harminix Rock Band Guitar and Drums */
-       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
        { }
 };
 
index 64a0ca4c92f3376562fe07c8f4855ce889417b1d..0d77f6c84950f7d921e03ae0e078a3c788b72bb6 100644 (file)
@@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
        }
 
        error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
-                       IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_mem;
index caa218a51b5ac94bd89cade097a41cd926251dcd..7613f1cac9517c1ecf30e18f2946c872c3e25252 100644 (file)
@@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
 
        error = request_threaded_irq(client->irq, NULL,
                                     mpr_touchkey_interrupt,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->dev.driver->name, mpr121);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
index 0b7b2f891752061a50ab92745e67e8025c146523..ca68f2992d7292ebdebafc0d6947e201448fc623 100644 (file)
@@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
        msleep(QT1070_RESET_TIME);
 
        err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
-               IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+                                  IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+                                  client->dev.driver->name, data);
        if (err) {
                dev_err(&client->dev, "fail to request irq\n");
                goto err_free_mem;
index 3afea3f897182adba5c1439dc38e7a34126fa440..c355cdde8d223e0f7184c297df95d41f3048fb43 100644 (file)
@@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
 
                error = request_threaded_irq(chip->irqnum, NULL,
                                             tca6416_keys_isr,
-                                            IRQF_TRIGGER_FALLING,
+                                            IRQF_TRIGGER_FALLING |
+                                               IRQF_ONESHOT,
                                             "tca6416-keypad", chip);
                if (error) {
                        dev_dbg(&client->dev,
index 5f87b28b31920b92caf1644413c8cab7fe4da66a..893869b29ed9895c0f1998fbb2068ce7a41ef402 100644 (file)
@@ -360,7 +360,7 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
                client->irq = gpio_to_irq(client->irq);
 
        error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, keypad_data);
        if (error) {
                dev_dbg(&client->dev,
index a4a445fb7020bcf1df98d1a36e958c7e70d83b39..4c34f21fbe2dff4a8b823e48dd96fb4635a81d16 100644 (file)
@@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad press key irq\n");
                goto error_irq_press;
        }
 
-       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad release key irq\n");
                goto error_irq_release;
index 0ac75bbad4d69d61e64719d1a2550778f121f599..2e5d5e1de64787f17c2349e1a3c575144411518f 100644 (file)
@@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
        struct ad714x_platform_data *plat_data = dev->platform_data;
        struct ad714x_chip *ad714x;
        void *drv_mem;
+       unsigned long irqflags;
 
        struct ad714x_button_drv *bt_drv;
        struct ad714x_slider_drv *sd_drv;
@@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
                alloc_idx++;
        }
 
+       irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
+       irqflags |= IRQF_ONESHOT;
+
        error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
-                               plat_data->irqflags ?
-                                       plat_data->irqflags : IRQF_TRIGGER_FALLING,
-                               "ad714x_captouch", ad714x);
+                                    irqflags, "ad714x_captouch", ad714x);
        if (error) {
                dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
                goto err_unreg_dev;
index 35083c6836c351ba5110f6fe3ec0cecc98b074e9..c1313d8535c349993f0bb64ba3940ffe3e32b1d2 100644 (file)
@@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
        /* REVISIT:  flush the event queue? */
 
        status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
-                       IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
+                                     IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                     dev_name(&pdev->dev), keys);
        if (status < 0)
                goto fail2;
 
index 2cf681d98c0d2d6433a6f8d11c502935d40c202a..d528c23e194f6efcbe7efbe8651d218aa6ef7965 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI  0x0252
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO   0x0253
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS   0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 
 #define BCM5974_DEVICE(prod) {                                 \
        .match_flags = (USB_DEVICE_ID_MATCH_DEVICE |            \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+       /* MacbookPro10,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
        /* Terminating entry */
        {}
 };
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
                { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0x84, sizeof(struct bt_data),
+               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+               { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+               { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+               { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+       },
        {}
 };
 
index cad5602d3ce45a525ca70d9cc1cf7f766348c24e..8b31473a81fe9bd6056dd95363405f0e7610c4ee 100644 (file)
@@ -216,7 +216,7 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
 
                rep_data[0] = 12;
                result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
-                                         rep_data[0], &rep_data, 2,
+                                         rep_data[0], rep_data, 2,
                                          WAC_MSG_RETRIES);
 
                if (result >= 0 && rep_data[1] > 2)
@@ -401,7 +401,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                break;
 
                        case HID_USAGE_CONTACTMAX:
-                               wacom_retrieve_report_data(intf, features);
+                               /* leave touch_max as is if predefined */
+                               if (!features->touch_max)
+                                       wacom_retrieve_report_data(intf, features);
                                i++;
                                break;
                        }
index e2482b40da5198fdb1406e135fff827d049e5895..bd4eb42776973b211aee79e40f51b47e2f5e8d27 100644 (file)
@@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
                        AD7879_TMR(ts->pen_down_acc_interval);
 
        err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
-                                  IRQF_TRIGGER_FALLING,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                   dev_name(dev), ts);
        if (err) {
                dev_err(dev, "irq %d busy?\n", ts->irq);
index 42e645062c208e842b6feb5ecaf6824004c49a0c..25fd0561a17d2f1afe83a84c28864e81510acbba 100644 (file)
@@ -1149,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
                goto err_free_object;
 
        error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
-                       pdata->irqflags, client->dev.driver->name, data);
+                                    pdata->irqflags | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_object;
index f2d03c06c2da6660c30f64c177164dafb432385f..5c487d23f11cbaedd29e65c5143ba611a90e7d78 100644 (file)
@@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
        input_set_drvdata(in_dev, bu21013_data);
 
        error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
-                                    IRQF_TRIGGER_FALLING | IRQF_SHARED,
+                                    IRQF_TRIGGER_FALLING | IRQF_SHARED |
+                                       IRQF_ONESHOT,
                                     DRIVER_TP, bu21013_data);
        if (error) {
                dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
index 237753ad10318b9fab24b6d559448ea41d492480..464f1bf4b61dcbf62af90196ba28608b6896e6f8 100644 (file)
@@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
        }
 
        err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
-                                  IRQF_TRIGGER_RISING, "touch_reset_key", ts);
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  "touch_reset_key", ts);
        if (err < 0) {
                dev_err(&client->dev,
                        "irq %d busy? error %d\n", client->irq, err);
index 3cd7a837f82b203f41f5ff1aa7cefcf5b3714594..cf299377fc4980e54c02a0bfeb25c41ae2fbc5ec 100644 (file)
@@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
                             MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
 
        err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
-                                  0, "mrstouch", tsdev);
+                                  IRQF_ONESHOT, "mrstouch", tsdev);
        if (err) {
                dev_err(tsdev->dev, "unable to allocate irq\n");
                goto err_free_mem;
index 72f6ba3a470937d2528207d20c53d182a0b7803f..953b4c105cad75ead98062869d7a6c0cc5237068 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
        input_set_drvdata(input, tsdata);
 
        error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, tsdata);
        if (error) {
                dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
index 7e74880973591bb61daa6e3317a94e43bb8f5415..368d2c6cf780cb0e9a1ffad750bace5a5d6b6e6c 100644 (file)
@@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
                                     dev_name(dev), ts);
        if (error < 0) {
                dev_err(ts->dev, "Could not allocate ts irq\n");
index b6adeaee9cc5f0f226781f59ba79961fd9eb1e0b..5ce3fa8ce6465e049dd415b784eaafe7e0fbf7ed 100644 (file)
@@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
        tsc2005_stop_scan(ts);
 
        error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
-                                    IRQF_TRIGGER_RISING, "tsc2005", ts);
+                                    IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                    "tsc2005", ts);
        if (error) {
                dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
                goto err_free_mem;
index d90a421e9caccbbfc38a3d2eb4dd20ef2a50c77b..625626391f2d39d3802710a1677ac49ef3181c9a 100644 (file)
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
 
+static struct dma_map_ops amd_iommu_dma_ops;
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -402,7 +404,7 @@ static void amd_iommu_stats_init(void)
                return;
 
        de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
-                                        (u32 *)&amd_iommu_unmap_flush);
+                                        &amd_iommu_unmap_flush);
 
        amd_iommu_stats_add(&compl_wait);
        amd_iommu_stats_add(&cnt_map_single);
@@ -547,26 +549,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 {
        struct amd_iommu_fault fault;
-       volatile u64 *raw;
-       int i;
 
        INC_STATS_COUNTER(pri_requests);
 
-       raw = (u64 *)(iommu->ppr_log + head);
-
-       /*
-        * Hardware bug: Interrupt may arrive before the entry is written to
-        * memory. If this happens we need to wait for the entry to arrive.
-        */
-       for (i = 0; i < LOOP_TIMEOUT; ++i) {
-               if (PPR_REQ_TYPE(raw[0]) != 0)
-                       break;
-               udelay(1);
-       }
-
        if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
                pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
                return;
@@ -578,12 +566,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
        fault.tag       = PPR_TAG(raw[0]);
        fault.flags     = PPR_FLAGS(raw[0]);
 
-       /*
-        * To detect the hardware bug we need to clear the entry
-        * to back to zero.
-        */
-       raw[0] = raw[1] = 0;
-
        atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 }
 
@@ -595,25 +577,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
        if (iommu->ppr_log == NULL)
                return;
 
+       /* enable ppr interrupts again */
+       writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
        spin_lock_irqsave(&iommu->lock, flags);
 
        head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
        tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 
        while (head != tail) {
+               volatile u64 *raw;
+               u64 entry[2];
+               int i;
 
-               /* Handle PPR entry */
-               iommu_handle_ppr_entry(iommu, head);
+               raw = (u64 *)(iommu->ppr_log + head);
+
+               /*
+                * Hardware bug: Interrupt may arrive before the entry is
+                * written to memory. If this happens we need to wait for the
+                * entry to arrive.
+                */
+               for (i = 0; i < LOOP_TIMEOUT; ++i) {
+                       if (PPR_REQ_TYPE(raw[0]) != 0)
+                               break;
+                       udelay(1);
+               }
 
-               /* Update and refresh ring-buffer state*/
+               /* Avoid memcpy function-call overhead */
+               entry[0] = raw[0];
+               entry[1] = raw[1];
+
+               /*
+                * To detect the hardware bug we need to clear the entry
+                * back to zero.
+                */
+               raw[0] = raw[1] = 0UL;
+
+               /* Update head pointer of hardware ring-buffer */
                head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
                writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+               /*
+                * Release iommu->lock because ppr-handling might need to
+                * re-aquire it
+                */
+               spin_unlock_irqrestore(&iommu->lock, flags);
+
+               /* Handle PPR entry */
+               iommu_handle_ppr_entry(iommu, entry);
+
+               spin_lock_irqsave(&iommu->lock, flags);
+
+               /* Refresh ring-buffer information */
+               head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
                tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
        }
 
-       /* enable ppr interrupts again */
-       writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -2250,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb,
                list_add_tail(&dma_domain->list, &iommu_pd_list);
                spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 
+               dev_data = get_dev_data(dev);
+
+               if (!dev_data->passthrough)
+                       dev->archdata.dma_ops = &amd_iommu_dma_ops;
+               else
+                       dev->archdata.dma_ops = &nommu_dma_ops;
+
                break;
        case BUS_NOTIFY_DEL_DEVICE:
 
index c56790375e0fd25d66c9b09cb3cfa694fae05acc..a33612f3206f25f1146df2c84b254b372b306382 100644 (file)
@@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf;                     /* largest PCI device id we have
                                           to handle */
 LIST_HEAD(amd_iommu_unity_map);                /* a list of required unity mappings
                                           we find in ACPI */
-bool amd_iommu_unmap_flush;            /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush;             /* if true, flush on every unmap */
 
 LIST_HEAD(amd_iommu_list);             /* list of all AMD IOMMUs in the
                                           system */
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
        if (!iommu->dev)
                return 1;
 
+       iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+                                               PCI_DEVFN(0, 0));
+
        iommu->cap_ptr = h->cap_ptr;
        iommu->pci_seg = h->pci_seg;
        iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
 {
        int i, j;
        u32 ioc_feature_control;
-       struct pci_dev *pdev = NULL;
+       struct pci_dev *pdev = iommu->root_pdev;
 
        /* RD890 BIOSes may not have completely reconfigured the iommu */
-       if (!is_rd890_iommu(iommu->dev))
+       if (!is_rd890_iommu(iommu->dev) || !pdev)
                return;
 
        /*
         * First, we need to ensure that the iommu is enabled. This is
         * controlled by a register in the northbridge
         */
-       pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
-       if (!pdev)
-               return;
 
        /* Select Northbridge indirect register 0x75 and enable writing */
        pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
        if (!(ioc_feature_control & 0x1))
                pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
 
-       pci_dev_put(pdev);
-
        /* Restore the iommu BAR */
        pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
                               iommu->stored_addr_lo);
@@ -1644,6 +1641,8 @@ static int __init amd_iommu_init(void)
 
        amd_iommu_init_api();
 
+       x86_platform.iommu_shutdown = disable_iommus;
+
        if (iommu_pass_through)
                goto out;
 
@@ -1652,8 +1651,6 @@ static int __init amd_iommu_init(void)
        else
                printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
-       x86_platform.iommu_shutdown = disable_iommus;
-
 out:
        return ret;
 
index 2452f3b7173619c449c04f025e2e2ff73cc44f29..c1b1d489817e2b667edbbeb95d36eae1d503a994 100644 (file)
@@ -481,6 +481,9 @@ struct amd_iommu {
        /* Pointer to PCI device of this IOMMU */
        struct pci_dev *dev;
 
+       /* Cache pdev to root device for resume quirks */
+       struct pci_dev *root_pdev;
+
        /* physical address of MMIO space */
        u64 mmio_phys;
        /* virtual address of MMIO space */
@@ -649,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
  * If true, the addresses will be flushed on unmap time, not when
  * they are reused
  */
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
 
 /* Smallest number of PASIDs supported by any IOMMU in the system */
 extern u32 amd_iommu_max_pasids;
index ecd679043d7740e6883aae9cbee68da6321fedc1..3f3d09d560ea3c6ce5e3bc7d019e377ac9e63dfe 100644 (file)
@@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as)
                return 0;
 
        as->pte_count = devm_kzalloc(smmu->dev,
-                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
+                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
        if (!as->pte_count) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device PTE cunters\n");
                return -ENOMEM;
        }
-       as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+       as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
        if (!as->pdir_page) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device page directory\n");
index 1a0ae4445ff2b47c4202359e6bba0b0f5bf8f336..5f21f629b7aebeac0163083a2a83a50f4e53c1a8 100644 (file)
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
                        skb = NULL;
                else if (*debug & DEBUG_SEND_ERR)
                        printk(KERN_DEBUG
-                              "%s ch%d mgr prim(%x) addr(%x) err %d\n",
-                              __func__, ch->nr, hh->prim, ch->addr, ret);
+                              "%s mgr prim(%x) err %d\n",
+                              __func__, hh->prim, ret);
        }
 out:
        mutex_unlock(&st->lmutex);
index 04cb8c88d74b7678d12389898ba0702cf0c17b55..12b2b55c519e7b3038677edfd9c1b4f710493f4c 100644 (file)
@@ -379,7 +379,7 @@ config LEDS_NETXBIG
 
 config LEDS_ASIC3
        bool "LED support for the HTC ASIC3"
-       depends on LEDS_CLASS
+       depends on LEDS_CLASS=y
        depends on MFD_ASIC3
        default y
        help
@@ -390,7 +390,7 @@ config LEDS_ASIC3
 
 config LEDS_RENESAS_TPU
        bool "LED support for Renesas TPU"
-       depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
+       depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
        help
          This option enables build of the LED TPU platform driver,
          suitable to drive any TPU channel on newer Renesas SoCs.
index 8ee92c81aec2c1577c3fb1cfd173d8835c2c7725..e663e6f413e989d835067811a992bb62dc316f36 100644 (file)
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
                led_cdev->brightness = led_cdev->brightness_get(led_cdev);
 }
 
-static ssize_t led_brightness_show(struct device *dev, 
+static ssize_t led_brightness_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
index d6860043f6f99f1fc0e4f819681d8fed9089c542..d65353d8d3fcb4ae012460c20bc33f20c2afa84f 100644 (file)
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
        if (!led_cdev->blink_brightness)
                led_cdev->blink_brightness = led_cdev->max_brightness;
 
-       if (led_get_trigger_data(led_cdev) &&
-           delay_on == led_cdev->blink_delay_on &&
-           delay_off == led_cdev->blink_delay_off)
-               return;
-
-       led_stop_software_blink(led_cdev);
-
        led_cdev->blink_delay_on = delay_on;
        led_cdev->blink_delay_off = delay_off;
 
index 41dc76db43118a347e4a8a06923b0fd076a12dc3..a019fbb70880bd402aea095b8b90a8ca26a0e66a 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/reboot.h>
 #include "leds.h"
 
+static int panic_heartbeats;
+
 struct heartbeat_trig_data {
        unsigned int phase;
        unsigned int period;
@@ -34,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
        unsigned long brightness = LED_OFF;
        unsigned long delay = 0;
 
+       if (unlikely(panic_heartbeats)) {
+               led_set_brightness(led_cdev, LED_OFF);
+               return;
+       }
+
        /* acts like an actual heart beat -- ie thump-thump-pause... */
        switch (heartbeat_data->phase) {
        case 0:
@@ -111,12 +118,19 @@ static int heartbeat_reboot_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+                                    unsigned long code, void *unused)
+{
+       panic_heartbeats = 1;
+       return NOTIFY_DONE;
+}
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
 
 static struct notifier_block heartbeat_panic_nb = {
-       .notifier_call = heartbeat_reboot_notifier,
+       .notifier_call = heartbeat_panic_notifier,
 };
 
 static int __init heartbeat_trig_init(void)
index 754f38f8a6922d94d425fcaef5d834a573851fd4..638dae048b4fada0633f2592538ee16535072b87 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/workqueue.h>
+#include <linux/delay.h>
 #include <scsi/scsi_dh.h>
 #include <linux/atomic.h>
 
@@ -61,11 +62,11 @@ struct multipath {
        struct list_head list;
        struct dm_target *ti;
 
-       spinlock_t lock;
-
        const char *hw_handler_name;
        char *hw_handler_params;
 
+       spinlock_t lock;
+
        unsigned nr_priority_groups;
        struct list_head priority_groups;
 
@@ -81,16 +82,17 @@ struct multipath {
        struct priority_group *next_pg; /* Switch to this PG if set */
        unsigned repeat_count;          /* I/Os left before calling PS again */
 
-       unsigned queue_io;              /* Must we queue all I/O? */
-       unsigned queue_if_no_path;      /* Queue I/O if last path fails? */
-       unsigned saved_queue_if_no_path;/* Saved state during suspension */
+       unsigned queue_io:1;            /* Must we queue all I/O? */
+       unsigned queue_if_no_path:1;    /* Queue I/O if last path fails? */
+       unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+
        unsigned pg_init_retries;       /* Number of times to retry pg_init */
        unsigned pg_init_count;         /* Number of times pg_init called */
        unsigned pg_init_delay_msecs;   /* Number of msecs before pg_init retry */
 
+       unsigned queue_size;
        struct work_struct process_queued_ios;
        struct list_head queued_ios;
-       unsigned queue_size;
 
        struct work_struct trigger_event;
 
@@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
        /*
         * Loop through priority groups until we find a valid path.
         * First time we skip PGs marked 'bypassed'.
-        * Second time we only try the ones we skipped.
+        * Second time we only try the ones we skipped, but set
+        * pg_init_delay_retry so we do not hammer controllers.
         */
        do {
                list_for_each_entry(pg, &m->priority_groups, list) {
                        if (pg->bypassed == bypassed)
                                continue;
-                       if (!__choose_path_in_pg(m, pg, nr_bytes))
+                       if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+                               if (!bypassed)
+                                       m->pg_init_delay_retry = 1;
                                return;
+                       }
                }
        } while (bypassed--);
 
@@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work)
 
        spin_lock_irqsave(&m->lock, flags);
 
-       if (!m->queue_size)
-               goto out;
-
        if (!m->current_pgpath)
                __choose_pgpath(m, 0);
 
@@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work)
        if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
                __pg_init_all_paths(m);
 
-out:
        spin_unlock_irqrestore(&m->lock, flags);
        if (!must_queue)
                dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@ out:
 static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                           unsigned long arg)
 {
-       struct multipath *m = (struct multipath *) ti->private;
-       struct block_device *bdev = NULL;
-       fmode_t mode = 0;
+       struct multipath *m = ti->private;
+       struct block_device *bdev;
+       fmode_t mode;
        unsigned long flags;
-       int r = 0;
+       int r;
+
+again:
+       bdev = NULL;
+       mode = 0;
+       r = 0;
 
        spin_lock_irqsave(&m->lock, flags);
 
@@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
        if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
                r = scsi_verify_blk_ioctl(NULL, cmd);
 
+       if (r == -EAGAIN && !fatal_signal_pending(current)) {
+               queue_work(kmultipathd, &m->process_queued_ios);
+               msleep(10);
+               goto again;
+       }
+
        return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 }
 
@@ -1643,7 +1656,7 @@ out:
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
        .name = "multipath",
-       .version = {1, 3, 0},
+       .version = {1, 4, 0},
        .module = THIS_MODULE,
        .ctr = multipath_ctr,
        .dtr = multipath_dtr,
index d039de8322f0a314fca492df4b932289d9033e0d..b58b7a33914abd721e3508835a61afd699221dbf 100644 (file)
@@ -1084,6 +1084,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->split_io = dm_rh_get_region_size(ms->rh);
        ti->num_flush_requests = 1;
        ti->num_discard_requests = 1;
+       ti->discard_zeroes_data_unsupported = 1;
 
        ms->kmirrord_wq = alloc_workqueue("kmirrord",
                                          WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
@@ -1214,7 +1215,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
         * We need to dec pending if this was a write.
         */
        if (rw == WRITE) {
-               if (!(bio->bi_rw & REQ_FLUSH))
+               if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
                        dm_rh_dec(ms->rh, map_context->ll);
                return error;
        }
index 7771ed2121820ac50c026f45e2f6c0a288ffba85..69732e03eb3490d636a0183bd22303742ab65c65 100644 (file)
@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
                return;
        }
 
+       if (bio->bi_rw & REQ_DISCARD)
+               return;
+
        /* We must inform the log that the sync count has changed. */
        log->type->set_region_sync(log, region, 0);
 
@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
        struct bio *bio;
 
        for (bio = bios->head; bio; bio = bio->bi_next) {
-               if (bio->bi_rw & REQ_FLUSH)
+               if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
                        continue;
                rh_inc(rh, dm_rh_bio_to_region(rh, bio));
        }
index 737d38865b693fb0288306477587f4e0d52f6cfa..3e2907f0bc462e261c05fab97cb0bff272649868 100644 (file)
@@ -1082,31 +1082,155 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
        return 0;
 }
 
-static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
-                                   dm_block_t *result)
+static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+       int r, inc;
+       struct thin_disk_superblock *disk_super;
+       struct dm_block *copy, *sblock;
+       dm_block_t held_root;
+
+       /*
+        * Copy the superblock.
+        */
+       dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
+       r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
+                              &sb_validator, &copy, &inc);
+       if (r)
+               return r;
+
+       BUG_ON(!inc);
+
+       held_root = dm_block_location(copy);
+       disk_super = dm_block_data(copy);
+
+       if (le64_to_cpu(disk_super->held_root)) {
+               DMWARN("Pool metadata snapshot already exists: release this before taking another.");
+
+               dm_tm_dec(pmd->tm, held_root);
+               dm_tm_unlock(pmd->tm, copy);
+               pmd->need_commit = 1;
+
+               return -EBUSY;
+       }
+
+       /*
+        * Wipe the spacemap since we're not publishing this.
+        */
+       memset(&disk_super->data_space_map_root, 0,
+              sizeof(disk_super->data_space_map_root));
+       memset(&disk_super->metadata_space_map_root, 0,
+              sizeof(disk_super->metadata_space_map_root));
+
+       /*
+        * Increment the data structures that need to be preserved.
+        */
+       dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
+       dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
+       dm_tm_unlock(pmd->tm, copy);
+
+       /*
+        * Write the held root into the superblock.
+        */
+       r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+                            &sb_validator, &sblock);
+       if (r) {
+               dm_tm_dec(pmd->tm, held_root);
+               pmd->need_commit = 1;
+               return r;
+       }
+
+       disk_super = dm_block_data(sblock);
+       disk_super->held_root = cpu_to_le64(held_root);
+       dm_bm_unlock(sblock);
+
+       pmd->need_commit = 1;
+
+       return 0;
+}
+
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+       int r;
+
+       down_write(&pmd->root_lock);
+       r = __reserve_metadata_snap(pmd);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
+
+static int __release_metadata_snap(struct dm_pool_metadata *pmd)
 {
        int r;
        struct thin_disk_superblock *disk_super;
-       struct dm_block *sblock;
+       struct dm_block *sblock, *copy;
+       dm_block_t held_root;
 
        r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
                             &sb_validator, &sblock);
        if (r)
                return r;
 
+       disk_super = dm_block_data(sblock);
+       held_root = le64_to_cpu(disk_super->held_root);
+       disk_super->held_root = cpu_to_le64(0);
+       pmd->need_commit = 1;
+
+       dm_bm_unlock(sblock);
+
+       if (!held_root) {
+               DMWARN("No pool metadata snapshot found: nothing to release.");
+               return -EINVAL;
+       }
+
+       r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
+       if (r)
+               return r;
+
+       disk_super = dm_block_data(copy);
+       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+       dm_sm_dec_block(pmd->metadata_sm, held_root);
+
+       return dm_tm_unlock(pmd->tm, copy);
+}
+
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
+{
+       int r;
+
+       down_write(&pmd->root_lock);
+       r = __release_metadata_snap(pmd);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
+
+static int __get_metadata_snap(struct dm_pool_metadata *pmd,
+                              dm_block_t *result)
+{
+       int r;
+       struct thin_disk_superblock *disk_super;
+       struct dm_block *sblock;
+
+       r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+                           &sb_validator, &sblock);
+       if (r)
+               return r;
+
        disk_super = dm_block_data(sblock);
        *result = le64_to_cpu(disk_super->held_root);
 
        return dm_bm_unlock(sblock);
 }
 
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
-                                  dm_block_t *result)
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+                             dm_block_t *result)
 {
        int r;
 
        down_read(&pmd->root_lock);
-       r = __get_held_metadata_root(pmd, result);
+       r = __get_metadata_snap(pmd, result);
        up_read(&pmd->root_lock);
 
        return r;
index ed4725e67c96fbae52f1ce48c28d79f0a02075a4..b88918ccdaf688e3bc5f10478023276893cc3521 100644 (file)
@@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
 
 /*
  * Hold/get root for userspace transaction.
+ *
+ * The metadata snapshot is a copy of the current superblock (minus the
+ * space maps).  Userland can access the data structures for READ
+ * operations only.  A small performance hit is incurred by providing this
+ * copy of the metadata to userland due to extra copy-on-write operations
+ * on the metadata nodes.  Release this as soon as you finish with it.
  */
-int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd);
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
 
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
-                                  dm_block_t *result);
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+                             dm_block_t *result);
 
 /*
  * Actions on a single virtual device.
index eb3d138ff55afc629e0d0167acae140223a86553..68694da0d21d0566a61649339cb3a4f3a3bbc943 100644 (file)
@@ -111,7 +111,7 @@ struct cell_key {
        dm_block_t block;
 };
 
-struct cell {
+struct dm_bio_prison_cell {
        struct hlist_node list;
        struct bio_prison *prison;
        struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
        return n;
 }
 
+static struct kmem_cache *_cell_cache;
+
 /*
  * @nr_cells should be the number of cells you want in use _concurrently_.
  * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
                return NULL;
 
        spin_lock_init(&prison->lock);
-       prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
-                                                       sizeof(struct cell));
+       prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
        if (!prison->cell_pool) {
                kfree(prison);
                return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
                       (lhs->block == rhs->block);
 }
 
-static struct cell *__search_bucket(struct hlist_head *bucket,
-                                   struct cell_key *key)
+static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
+                                                 struct cell_key *key)
 {
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct hlist_node *tmp;
 
        hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
  */
 static int bio_detain(struct bio_prison *prison, struct cell_key *key,
-                     struct bio *inmate, struct cell **ref)
+                     struct bio *inmate, struct dm_bio_prison_cell **ref)
 {
        int r = 1;
        unsigned long flags;
        uint32_t hash = hash_key(prison, key);
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
 
        BUG_ON(hash > prison->nr_buckets);
 
@@ -273,7 +274,7 @@ out:
 /*
  * @inmates must have been initialised prior to this call
  */
-static void __cell_release(struct cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
 {
        struct bio_prison *prison = cell->prison;
 
@@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release(struct cell *cell, struct bio_list *bios)
+static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
  * bio may be in the cell.  This function releases the cell, and also does
  * a sanity check.
  */
-static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        BUG_ON(cell->holder != bio);
        BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
        __cell_release(cell, NULL);
 }
 
-static void cell_release_singleton(struct cell *cell, struct bio *bio)
+static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
 /*
  * Sometimes we don't want the holder, just the additional bios.
  */
-static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                    struct bio_list *inmates)
 {
        struct bio_prison *prison = cell->prison;
 
@@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                  struct bio_list *inmates)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
        spin_unlock_irqrestore(&prison->lock, flags);
 }
 
-static void cell_error(struct cell *cell)
+static void cell_error(struct dm_bio_prison_cell *cell)
 {
        struct bio_prison *prison = cell->prison;
        struct bio_list bios;
@@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
  * also provides the interface for creating and destroying internal
  * devices.
  */
-struct new_mapping;
+struct dm_thin_new_mapping;
 
 struct pool_features {
        unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@ struct pool {
        struct deferred_set shared_read_ds;
        struct deferred_set all_io_ds;
 
-       struct new_mapping *next_mapping;
+       struct dm_thin_new_mapping *next_mapping;
        mempool_t *mapping_pool;
        mempool_t *endio_hook_pool;
 };
@@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
 
 /*----------------------------------------------------------------*/
 
-struct endio_hook {
+struct dm_thin_endio_hook {
        struct thin_c *tc;
        struct deferred_entry *shared_read_entry;
        struct deferred_entry *all_io_entry;
-       struct new_mapping *overwrite_mapping;
+       struct dm_thin_new_mapping *overwrite_mapping;
 };
 
 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
        bio_list_init(master);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                if (h->tc == tc)
                        bio_endio(bio, DM_ENDIO_REQUEUE);
                else
@@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool)
 /*
  * Bio endio functions.
  */
-struct new_mapping {
+struct dm_thin_new_mapping {
        struct list_head list;
 
        unsigned quiesced:1;
@@ -746,7 +750,7 @@ struct new_mapping {
        struct thin_c *tc;
        dm_block_t virt_block;
        dm_block_t data_block;
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
        int err;
 
        /*
@@ -759,7 +763,7 @@ struct new_mapping {
        bio_end_io_t *saved_bi_end_io;
 };
 
-static void __maybe_add_mapping(struct new_mapping *m)
+static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
 {
        struct pool *pool = m->tc->pool;
 
@@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
 static void copy_complete(int read_err, unsigned long write_err, void *context)
 {
        unsigned long flags;
-       struct new_mapping *m = context;
+       struct dm_thin_new_mapping *m = context;
        struct pool *pool = m->tc->pool;
 
        m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 static void overwrite_endio(struct bio *bio, int err)
 {
        unsigned long flags;
-       struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
-       struct new_mapping *m = h->overwrite_mapping;
+       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_new_mapping *m = h->overwrite_mapping;
        struct pool *pool = m->tc->pool;
 
        m->err = err;
@@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
 /*
  * This sends the bios in the cell back to the deferred_bios list.
  */
-static void cell_defer(struct thin_c *tc, struct cell *cell,
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
                       dm_block_t data_block)
 {
        struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
  * Same as cell_defer above, except it omits one particular detainee,
  * a write bio that covers the block and has already been processed.
  */
-static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
        struct bio_list bios;
        struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
        wake_worker(pool);
 }
 
-static void process_prepared_mapping(struct new_mapping *m)
+static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 {
        struct thin_c *tc = m->tc;
        struct bio *bio;
@@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
        mempool_free(m, tc->pool->mapping_pool);
 }
 
-static void process_prepared_discard(struct new_mapping *m)
+static void process_prepared_discard(struct dm_thin_new_mapping *m)
 {
        int r;
        struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
 }
 
 static void process_prepared(struct pool *pool, struct list_head *head,
-                            void (*fn)(struct new_mapping *))
+                            void (*fn)(struct dm_thin_new_mapping *))
 {
        unsigned long flags;
        struct list_head maps;
-       struct new_mapping *m, *tmp;
+       struct dm_thin_new_mapping *m, *tmp;
 
        INIT_LIST_HEAD(&maps);
        spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
        return pool->next_mapping ? 0 : -ENOMEM;
 }
 
-static struct new_mapping *get_next_mapping(struct pool *pool)
+static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
 {
-       struct new_mapping *r = pool->next_mapping;
+       struct dm_thin_new_mapping *r = pool->next_mapping;
 
        BUG_ON(!pool->next_mapping);
 
@@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
                          struct dm_dev *origin, dm_block_t data_origin,
                          dm_block_t data_dest,
-                         struct cell *cell, struct bio *bio)
+                         struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        int r;
        struct pool *pool = tc->pool;
-       struct new_mapping *m = get_next_mapping(pool);
+       struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
        INIT_LIST_HEAD(&m->list);
        m->quiesced = 0;
@@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
         * bio immediately. Otherwise we use kcopyd to clone the data first.
         */
        if (io_overwrites_block(pool, bio)) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
                                   dm_block_t data_origin, dm_block_t data_dest,
-                                  struct cell *cell, struct bio *bio)
+                                  struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        schedule_copy(tc, virt_block, tc->pool_dev,
                      data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
                                   dm_block_t data_dest,
-                                  struct cell *cell, struct bio *bio)
+                                  struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        schedule_copy(tc, virt_block, tc->origin_dev,
                      virt_block, data_dest, cell, bio);
 }
 
 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
-                         dm_block_t data_block, struct cell *cell,
+                         dm_block_t data_block, struct dm_bio_prison_cell *cell,
                          struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       struct new_mapping *m = get_next_mapping(pool);
+       struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
        INIT_LIST_HEAD(&m->list);
        m->quiesced = 1;
@@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
                process_prepared_mapping(m);
 
        else if (io_overwrites_block(pool, bio)) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
                remap_and_issue(tc, bio, data_block);
-
        } else {
                int r;
                struct dm_io_region to;
@@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
  */
 static void retry_on_resume(struct bio *bio)
 {
-       struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
        struct thin_c *tc = h->tc;
        struct pool *pool = tc->pool;
        unsigned long flags;
@@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
        spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static void no_space(struct cell *cell)
+static void no_space(struct dm_bio_prison_cell *cell)
 {
        struct bio *bio;
        struct bio_list bios;
@@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
        int r;
        unsigned long flags;
        struct pool *pool = tc->pool;
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
        struct cell_key key, key2;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_thin_lookup_result lookup_result;
-       struct new_mapping *m;
+       struct dm_thin_new_mapping *m;
 
        build_virtual_key(tc->td, block, &key);
        if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1240,7 +1245,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
 
                        cell_release_singleton(cell, bio);
                        cell_release_singleton(cell2, bio);
-                       remap_and_issue(tc, bio, lookup_result.block);
+                       if ((!lookup_result.shared) && pool->pf.discard_passdown)
+                               remap_and_issue(tc, bio, lookup_result.block);
+                       else
+                               bio_endio(bio, 0);
                }
                break;
 
@@ -1263,7 +1271,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
                          struct cell_key *key,
                          struct dm_thin_lookup_result *lookup_result,
-                         struct cell *cell)
+                         struct dm_bio_prison_cell *cell)
 {
        int r;
        dm_block_t data_block;
@@ -1290,7 +1298,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
                               dm_block_t block,
                               struct dm_thin_lookup_result *lookup_result)
 {
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct pool *pool = tc->pool;
        struct cell_key key;
 
@@ -1305,7 +1313,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
        if (bio_data_dir(bio) == WRITE)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
                h->shared_read_entry = ds_inc(&pool->shared_read_ds);
 
@@ -1315,7 +1323,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
 }
 
 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
-                           struct cell *cell)
+                           struct dm_bio_prison_cell *cell)
 {
        int r;
        dm_block_t data_block;
@@ -1363,7 +1371,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
 {
        int r;
        dm_block_t block = get_bio_block(tc, bio);
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct cell_key key;
        struct dm_thin_lookup_result lookup_result;
 
@@ -1432,7 +1440,7 @@ static void process_deferred_bios(struct pool *pool)
        spin_unlock_irqrestore(&pool->lock, flags);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
                struct thin_c *tc = h->tc;
 
                /*
@@ -1522,10 +1530,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
        wake_worker(pool);
 }
 
-static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+       struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
 
        h->tc = tc;
        h->shared_read_entry = NULL;
@@ -1687,6 +1695,9 @@ static void __pool_destroy(struct pool *pool)
        kfree(pool);
 }
 
+static struct kmem_cache *_new_mapping_cache;
+static struct kmem_cache *_endio_hook_cache;
+
 static struct pool *pool_create(struct mapped_device *pool_md,
                                struct block_device *metadata_dev,
                                unsigned long block_size, char **error)
@@ -1755,16 +1766,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
        ds_init(&pool->all_io_ds);
 
        pool->next_mapping = NULL;
-       pool->mapping_pool =
-               mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+       pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
+                                                     _new_mapping_cache);
        if (!pool->mapping_pool) {
                *error = "Error creating pool's mapping mempool";
                err_p = ERR_PTR(-ENOMEM);
                goto bad_mapping_pool;
        }
 
-       pool->endio_hook_pool =
-               mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+       pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
+                                                        _endio_hook_cache);
        if (!pool->endio_hook_pool) {
                *error = "Error creating pool's endio_hook mempool";
                err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2287,43 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
        return 0;
 }
 
+static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+       int r;
+
+       r = check_arg_count(argc, 1);
+       if (r)
+               return r;
+
+       r = dm_pool_commit_metadata(pool->pmd);
+       if (r) {
+               DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+                     __func__, r);
+               return r;
+       }
+
+       r = dm_pool_reserve_metadata_snap(pool->pmd);
+       if (r)
+               DMWARN("reserve_metadata_snap message failed.");
+
+       return r;
+}
+
+static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+       int r;
+
+       r = check_arg_count(argc, 1);
+       if (r)
+               return r;
+
+       r = dm_pool_release_metadata_snap(pool->pmd);
+       if (r)
+               DMWARN("release_metadata_snap message failed.");
+
+       return r;
+}
+
 /*
  * Messages supported:
  *   create_thin       <dev_id>
@@ -2283,6 +2331,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
  *   delete            <dev_id>
  *   trim              <dev_id> <new_size_in_sectors>
  *   set_transaction_id <current_trans_id> <new_trans_id>
+ *   reserve_metadata_snap
+ *   release_metadata_snap
  */
 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
 {
@@ -2302,6 +2352,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        else if (!strcasecmp(argv[0], "set_transaction_id"))
                r = process_set_transaction_id_mesg(argc, argv, pool);
 
+       else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
+               r = process_reserve_metadata_snap_mesg(argc, argv, pool);
+
+       else if (!strcasecmp(argv[0], "release_metadata_snap"))
+               r = process_release_metadata_snap_mesg(argc, argv, pool);
+
        else
                DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
 
@@ -2361,7 +2417,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                if (r)
                        return r;
 
-               r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+               r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
                if (r)
                        return r;
 
@@ -2457,7 +2513,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 1, 0},
+       .version = {1, 2, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2575,6 +2631,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = 1;
                ti->num_discard_requests = 1;
+               ti->discard_zeroes_data_unsupported = 1;
        }
 
        dm_put(pool_md);
@@ -2613,9 +2670,9 @@ static int thin_endio(struct dm_target *ti,
                      union map_info *map_context)
 {
        unsigned long flags;
-       struct endio_hook *h = map_context->ptr;
+       struct dm_thin_endio_hook *h = map_context->ptr;
        struct list_head work;
-       struct new_mapping *m, *tmp;
+       struct dm_thin_new_mapping *m, *tmp;
        struct pool *pool = h->tc->pool;
 
        if (h->shared_read_entry) {
@@ -2755,7 +2812,32 @@ static int __init dm_thin_init(void)
 
        r = dm_register_target(&pool_target);
        if (r)
-               dm_unregister_target(&thin_target);
+               goto bad_pool_target;
+
+       r = -ENOMEM;
+
+       _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
+       if (!_cell_cache)
+               goto bad_cell_cache;
+
+       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+       if (!_new_mapping_cache)
+               goto bad_new_mapping_cache;
+
+       _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
+       if (!_endio_hook_cache)
+               goto bad_endio_hook_cache;
+
+       return 0;
+
+bad_endio_hook_cache:
+       kmem_cache_destroy(_new_mapping_cache);
+bad_new_mapping_cache:
+       kmem_cache_destroy(_cell_cache);
+bad_cell_cache:
+       dm_unregister_target(&pool_target);
+bad_pool_target:
+       dm_unregister_target(&thin_target);
 
        return r;
 }
@@ -2764,6 +2846,10 @@ static void dm_thin_exit(void)
 {
        dm_unregister_target(&thin_target);
        dm_unregister_target(&pool_target);
+
+       kmem_cache_destroy(_cell_cache);
+       kmem_cache_destroy(_new_mapping_cache);
+       kmem_cache_destroy(_endio_hook_cache);
 }
 
 module_init(dm_thin_init);
index 1c2f9048e1ae010864c4d2478c05f9a510c729e1..db02d2efb76f943185ec0d4101c4913fe0b3a41e 100644 (file)
@@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
 }
 EXPORT_SYMBOL(md_flush_request);
 
-/* Support for plugging.
- * This mirrors the plugging support in request_queue, but does not
- * require having a whole queue or request structures.
- * We allocate an md_plug_cb for each md device and each thread it gets
- * plugged on.  This links tot the private plug_handle structure in the
- * personality data where we keep a count of the number of outstanding
- * plugs so other code can see if a plug is active.
- */
-struct md_plug_cb {
-       struct blk_plug_cb cb;
-       struct mddev *mddev;
-};
-
-static void plugger_unplug(struct blk_plug_cb *cb)
+void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
 {
-       struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
-       if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
-               md_wakeup_thread(mdcb->mddev->thread);
-       kfree(mdcb);
-}
-
-/* Check that an unplug wakeup will come shortly.
- * If not, wakeup the md thread immediately
- */
-int mddev_check_plugged(struct mddev *mddev)
-{
-       struct blk_plug *plug = current->plug;
-       struct md_plug_cb *mdcb;
-
-       if (!plug)
-               return 0;
-
-       list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
-               if (mdcb->cb.callback == plugger_unplug &&
-                   mdcb->mddev == mddev) {
-                       /* Already on the list, move to top */
-                       if (mdcb != list_first_entry(&plug->cb_list,
-                                                   struct md_plug_cb,
-                                                   cb.list))
-                               list_move(&mdcb->cb.list, &plug->cb_list);
-                       return 1;
-               }
-       }
-       /* Not currently on the callback list */
-       mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
-       if (!mdcb)
-               return 0;
-
-       mdcb->mddev = mddev;
-       mdcb->cb.callback = plugger_unplug;
-       atomic_inc(&mddev->plug_cnt);
-       list_add(&mdcb->cb.list, &plug->cb_list);
-       return 1;
+       struct mddev *mddev = cb->data;
+       md_wakeup_thread(mddev->thread);
+       kfree(cb);
 }
-EXPORT_SYMBOL_GPL(mddev_check_plugged);
+EXPORT_SYMBOL(md_unplug);
 
 static inline struct mddev *mddev_get(struct mddev *mddev)
 {
@@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev)
        atomic_set(&mddev->active, 1);
        atomic_set(&mddev->openers, 0);
        atomic_set(&mddev->active_io, 0);
-       atomic_set(&mddev->plug_cnt, 0);
        spin_lock_init(&mddev->write_lock);
        atomic_set(&mddev->flush_pending, 0);
        init_waitqueue_head(&mddev->sb_wait);
@@ -2931,6 +2882,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
                 * can be sane */
                return -EBUSY;
        rdev->data_offset = offset;
+       rdev->new_data_offset = offset;
        return len;
 }
 
@@ -3926,8 +3878,8 @@ array_state_show(struct mddev *mddev, char *page)
        return sprintf(page, "%s\n", array_states[st]);
 }
 
-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
-static int md_set_readonly(struct mddev * mddev, int is_open);
+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
 static int do_md_run(struct mddev * mddev);
 static int restart_array(struct mddev *mddev);
 
@@ -3943,14 +3895,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                /* stopping an active array */
                if (atomic_read(&mddev->openers) > 0)
                        return -EBUSY;
-               err = do_md_stop(mddev, 0, 0);
+               err = do_md_stop(mddev, 0, NULL);
                break;
        case inactive:
                /* stopping an active array */
                if (mddev->pers) {
                        if (atomic_read(&mddev->openers) > 0)
                                return -EBUSY;
-                       err = do_md_stop(mddev, 2, 0);
+                       err = do_md_stop(mddev, 2, NULL);
                } else
                        err = 0; /* already inactive */
                break;
@@ -3958,7 +3910,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                break; /* not supported yet */
        case readonly:
                if (mddev->pers)
-                       err = md_set_readonly(mddev, 0);
+                       err = md_set_readonly(mddev, NULL);
                else {
                        mddev->ro = 1;
                        set_disk_ro(mddev->gendisk, 1);
@@ -3968,7 +3920,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        case read_auto:
                if (mddev->pers) {
                        if (mddev->ro == 0)
-                               err = md_set_readonly(mddev, 0);
+                               err = md_set_readonly(mddev, NULL);
                        else if (mddev->ro == 1)
                                err = restart_array(mddev);
                        if (err == 0) {
@@ -5351,15 +5303,17 @@ void md_stop(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_stop);
 
-static int md_set_readonly(struct mddev *mddev, int is_open)
+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
 {
        int err = 0;
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open) {
+       if (atomic_read(&mddev->openers) > !!bdev) {
                printk("md: %s still in use.\n",mdname(mddev));
                err = -EBUSY;
                goto out;
        }
+       if (bdev)
+               sync_blockdev(bdev);
        if (mddev->pers) {
                __md_stop_writes(mddev);
 
@@ -5381,18 +5335,26 @@ out:
  *   0 - completely stop and dis-assemble array
  *   2 - stop but do not disassemble array
  */
-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
+static int do_md_stop(struct mddev * mddev, int mode,
+                     struct block_device *bdev)
 {
        struct gendisk *disk = mddev->gendisk;
        struct md_rdev *rdev;
 
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open ||
+       if (atomic_read(&mddev->openers) > !!bdev ||
            mddev->sysfs_active) {
                printk("md: %s still in use.\n",mdname(mddev));
                mutex_unlock(&mddev->open_mutex);
                return -EBUSY;
        }
+       if (bdev)
+               /* It is possible IO was issued on some other
+                * open file which was closed before we took ->open_mutex.
+                * As that was not the last close __blkdev_put will not
+                * have called sync_blockdev, so we must.
+                */
+               sync_blockdev(bdev);
 
        if (mddev->pers) {
                if (mddev->ro)
@@ -5466,7 +5428,7 @@ static void autorun_array(struct mddev *mddev)
        err = do_md_run(mddev);
        if (err) {
                printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
-               do_md_stop(mddev, 0, 0);
+               do_md_stop(mddev, 0, NULL);
        }
 }
 
@@ -5784,8 +5746,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
                if ((info->state & (1<<MD_DISK_SYNC)) &&
-                   (!test_bit(In_sync, &rdev->flags) ||
-                    rdev->raid_disk != info->raid_disk)) {
+                    rdev->raid_disk != info->raid_disk) {
                        /* This was a hot-add request, but events doesn't
                         * match, so reject it.
                         */
@@ -6482,11 +6443,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        goto done_unlock;
 
                case STOP_ARRAY:
-                       err = do_md_stop(mddev, 0, 1);
+                       err = do_md_stop(mddev, 0, bdev);
                        goto done_unlock;
 
                case STOP_ARRAY_RO:
-                       err = md_set_readonly(mddev, 1);
+                       err = md_set_readonly(mddev, bdev);
                        goto done_unlock;
 
                case BLKROSET:
@@ -6751,7 +6712,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
        thread->tsk = kthread_run(md_thread, thread,
                                  "%s_%s",
                                  mdname(thread->mddev),
-                                 name ?: mddev->pers->name);
+                                 name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -7298,6 +7259,7 @@ void md_do_sync(struct mddev *mddev)
        int skipped = 0;
        struct md_rdev *rdev;
        char *desc;
+       struct blk_plug plug;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7409,7 @@ void md_do_sync(struct mddev *mddev)
        }
        mddev->curr_resync_completed = j;
 
+       blk_start_plug(&plug);
        while (j < max_sectors) {
                sector_t sectors;
 
@@ -7552,6 +7515,7 @@ void md_do_sync(struct mddev *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
+       blk_finish_plug(&plug);
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
index 7b4a3c318cae437ecb2041c33b2bdcee2c66bf15..f385b038589d32313014e07af203b8f742bec4a7 100644 (file)
@@ -266,9 +266,6 @@ struct mddev {
        int                             new_chunk_sectors;
        int                             reshape_backwards;
 
-       atomic_t                        plug_cnt;       /* If device is expecting
-                                                        * more bios soon.
-                                                        */
        struct md_thread                *thread;        /* management thread */
        struct md_thread                *sync_thread;   /* doing resync or reconstruct */
        sector_t                        curr_resync;    /* last block scheduled */
@@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
                                   struct mddev *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
                                   struct mddev *mddev);
-extern int mddev_check_plugged(struct mddev *mddev);
 extern void md_trim_bio(struct bio *bio, int offset, int size);
+
+extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
+static inline int mddev_check_plugged(struct mddev *mddev)
+{
+       return !!blk_check_plugged(md_unplug, mddev,
+                                  sizeof(struct blk_plug_cb));
+}
 #endif /* _MD_MD_H */
index 9339e67fcc79a9f961d016d3f80291ce012323bb..61a1833ebaf33ec40afaf752db9a849439c9bd13 100644 (file)
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
        }
 
        {
-               mddev->thread = md_register_thread(multipathd, mddev, NULL);
+               mddev->thread = md_register_thread(multipathd, mddev,
+                                                  "multipath");
                if (!mddev->thread) {
                        printk(KERN_ERR "multipath: couldn't allocate thread"
                                " for %s\n", mdname(mddev));
index 50ed53bf4aa2b1efe1136c2520067ece16bb9b35..fc90c11620adb026c9842d6e95497248d02556e8 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device-mapper.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
        ca->nr = nr_blocks;
        ca->nr_free = nr_blocks;
-       ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-       if (!ca->counts)
-               return -ENOMEM;
+
+       if (!nr_blocks)
+               ca->counts = NULL;
+       else {
+               ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+               if (!ca->counts)
+                       return -ENOMEM;
+       }
 
        return 0;
 }
 
+static void ca_destroy(struct count_array *ca)
+{
+       vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
        int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
        dm_block_t nr_blocks = ca->nr + extra_blocks;
-       uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+       uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
        if (!counts)
                return -ENOMEM;
 
-       memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-       kfree(ca->counts);
+       if (ca->counts) {
+               memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+               ca_destroy(ca);
+       }
        ca->nr = nr_blocks;
        ca->nr_free += extra_blocks;
        ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
        return 0;
 }
 
-static void ca_destroy(struct count_array *ca)
-{
-       kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
index fc469ba9f6277a7c701615a1b3d788a05fa52559..3d0ed53328831627ab6ec79cb85946f36b0fe7a4 100644 (file)
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
                                       dm_block_t nr_blocks)
 {
        struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-       return dm_sm_checker_create_fresh(sm);
+       struct dm_space_map *smc;
+
+       if (IS_ERR_OR_NULL(sm))
+               return sm;
+
+       smc = dm_sm_checker_create_fresh(sm);
+       if (IS_ERR(smc))
+               dm_sm_destroy(sm);
+
+       return smc;
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
index 6f8d38747d7f438294fca80e24a3e3a61441dda1..e5604b32d91ff0017a5a625cfd8106fca92bfe19 100644 (file)
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
+       if (!tm->is_clone)
+               wipe_shadow_table(tm);
+
        kfree(tm);
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -249,6 +252,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
 
        return r;
 }
+EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
 
 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
                    struct dm_block_validator *v,
@@ -259,6 +263,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
 
        return dm_bm_read_lock(tm->bm, b, v, blk);
 }
+EXPORT_SYMBOL_GPL(dm_tm_read_lock);
 
 int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
 {
@@ -342,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
 
        } else {
                r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -362,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
        }
 
        return 0;
index 835de7168cd3ae7d28409e46ebcc281e4b8fe917..36a8fc059ac36efc068603c51348132324d5d399 100644 (file)
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                int bad_sectors;
 
                int disk = start_disk + i;
-               if (disk >= conf->raid_disks)
-                       disk -= conf->raid_disks;
+               if (disk >= conf->raid_disks * 2)
+                       disk -= conf->raid_disks * 2;
 
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        struct md_rdev *blocked_rdev;
-       int plugged;
        int first_clone;
        int sectors_handled;
        int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
         * the bad blocks.  Each set of writes gets it's own r1bio
         * with a set of bios attached.
         */
-       plugged = mddev_check_plugged(mddev);
 
        disks = conf->raid_disks * 2;
  retry_write:
@@ -1191,6 +1189,8 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
 
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
 
        if (atomic_dec_and_test(&r1_bio->remaining)) {
                /* if we're here, all write(s) have completed, so clean up */
-               md_done_sync(mddev, r1_bio->sectors, 1);
-               put_buf(r1_bio);
+               int s = r1_bio->sectors;
+               if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+                   test_bit(R1BIO_WriteError, &r1_bio->state))
+                       reschedule_retry(r1_bio);
+               else {
+                       put_buf(r1_bio);
+                       md_done_sync(mddev, s, 1);
+               }
        }
 }
 
@@ -2170,8 +2173,7 @@ static void raid1d(struct mddev *mddev)
        blk_start_plug(&plug);
        for (;;) {
 
-               if (atomic_read(&mddev->plug_cnt) == 0)
-                       flush_pending_writes(conf);
+               flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -2488,9 +2490,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
         */
        if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                atomic_set(&r1_bio->remaining, read_targets);
-               for (i = 0; i < conf->raid_disks * 2; i++) {
+               for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
                        bio = r1_bio->bios[i];
                        if (bio->bi_end_io == end_sync_read) {
+                               read_targets--;
                                md_sync_acct(bio->bi_bdev, nr_sectors);
                                generic_make_request(bio);
                        }
@@ -2550,6 +2553,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
        err = -EINVAL;
        spin_lock_init(&conf->device_lock);
        rdev_for_each(rdev, mddev) {
+               struct request_queue *q;
                int disk_idx = rdev->raid_disk;
                if (disk_idx >= mddev->raid_disks
                    || disk_idx < 0)
@@ -2562,6 +2566,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                if (disk->rdev)
                        goto abort;
                disk->rdev = rdev;
+               q = bdev_get_queue(rdev->bdev);
+               if (q->merge_bvec_fn)
+                       mddev->merge_check_needed = 1;
 
                disk->head_position = 0;
        }
@@ -2617,7 +2624,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                goto abort;
        }
        err = -ENOMEM;
-       conf->thread = md_register_thread(raid1d, mddev, NULL);
+       conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid1:%s: couldn't allocate thread\n",
index 987db37cb875ead2041fede756062835158ecf0c..5d33603a497d6b7a0ee53c54d51501b99c4ee2ac 100644 (file)
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
-       int plugged;
        int sectors_handled;
        int max_sectors;
        int sectors;
@@ -1239,7 +1238,6 @@ read_again:
         * of r10_bios is recored in bio->bi_phys_segments just as with
         * the read case.
         */
-       plugged = mddev_check_plugged(mddev);
 
        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
        raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
 
                if (!r10_bio->devs[i].repl_bio)
                        continue;
@@ -1423,6 +1423,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !mddev->bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage, WRITE)
+                                            s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage,
+                                            s, conf->tmppage,
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
-               "md/raid10:%s: %s: redirecting"
+               "md/raid10:%s: %s: redirecting "
                "sector %llu to another mirror\n",
                mdname(mddev),
                bdevname(rdev->bdev, b),
@@ -2890,6 +2889,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
+                       if (sect >= mddev->resync_max_sectors) {
+                               /* last stripe is not complete - don't
+                                * try to recover this sector.
+                                */
+                               continue;
+                       }
                        /* Unless we are doing a full sync, or a replacement
                         * we only need to recover the block if it is set in
                         * the bitmap
@@ -3421,7 +3426,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        spin_lock_init(&conf->resync_lock);
        init_waitqueue_head(&conf->wait_barrier);
 
-       conf->thread = md_register_thread(raid10d, mddev, NULL);
+       conf->thread = md_register_thread(raid10d, mddev, "raid10");
        if (!conf->thread)
                goto out;
 
@@ -3475,6 +3480,7 @@ static int run(struct mddev *mddev)
 
        rdev_for_each(rdev, mddev) {
                long long diff;
+               struct request_queue *q;
 
                disk_idx = rdev->raid_disk;
                if (disk_idx < 0)
@@ -3493,6 +3499,9 @@ static int run(struct mddev *mddev)
                                goto out_free_conf;
                        disk->rdev = rdev;
                }
+               q = bdev_get_queue(rdev->bdev);
+               if (q->merge_bvec_fn)
+                       mddev->merge_check_needed = 1;
                diff = (rdev->new_data_offset - rdev->data_offset);
                if (!mddev->reshape_backwards)
                        diff = -diff;
index d26767246d26ad1d2bb9a2da371ab1bbbdf130fc..bde9da2baa392d76a1ea885a136790487ccf962f 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
        } else {
                const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
+               int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
                atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (conf->mddev->degraded >= conf->max_degraded)
+               else if (conf->mddev->degraded >= conf->max_degraded) {
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+               } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
                        /* Oh, no!!! */
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (atomic_read(&rdev->read_errors)
+               else if (atomic_read(&rdev->read_errors)
                         > conf->max_nr_stripes)
                        printk(KERN_WARNING
                               "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
-                       md_error(conf->mddev, rdev);
+                       if (!(set_bad
+                             && test_bit(In_sync, &rdev->flags)
+                             && rdev_set_badblocks(
+                                     rdev, sh->sector, STRIPE_SECTORS, 0)))
+                               md_error(conf->mddev, rdev);
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (conf->mddev->external && unlikely(s.blocked_rdev))
-               md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+       if (unlikely(s.blocked_rdev)) {
+               if (conf->mddev->external)
+                       md_wait_for_blocked_rdev(s.blocked_rdev,
+                                                conf->mddev);
+               else
+                       /* Internal metadata will immediately
+                        * be written by raid5d, so we don't
+                        * need to wait here.
+                        */
+                       rdev_dec_pending(s.blocked_rdev,
+                                        conf->mddev);
+       }
 
        if (s.handle_bad_blocks)
                for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
-       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
-       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
+                       mddev_check_plugged(mddev);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        break;
                }
-                       
        }
-       if (!plugged)
-               md_wakeup_thread(mddev->thread);
 
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
@@ -4521,7 +4543,7 @@ static void raid5d(struct mddev *mddev)
        while (1) {
                struct bio *bio;
 
-               if (atomic_read(&mddev->plug_cnt) == 0 &&
+               if (
                    !list_empty(&conf->bitmap_list)) {
                        /* Now is a good time to flush some bitmap updates */
                        conf->seq_flush++;
@@ -4531,8 +4553,7 @@ static void raid5d(struct mddev *mddev)
                        conf->seq_write = conf->seq_flush;
                        activate_bit_delay(conf);
                }
-               if (atomic_read(&mddev->plug_cnt) == 0)
-                       raid5_activate_delayed(conf);
+               raid5_activate_delayed(conf);
 
                while ((bio = remove_bio_from_retry(conf))) {
                        int ok;
@@ -4823,6 +4844,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        int raid_disk, memory, max_disks;
        struct md_rdev *rdev;
        struct disk_info *disk;
+       char pers_name[6];
 
        if (mddev->new_level != 5
            && mddev->new_level != 4
@@ -4946,7 +4968,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
                       mdname(mddev), memory);
 
-       conf->thread = md_register_thread(raid5d, mddev, NULL);
+       sprintf(pers_name, "raid%d", mddev->new_level);
+       conf->thread = md_register_thread(raid5d, mddev, pers_name);
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5488,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->saved_raid_disk >= 0 &&
            rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
-               disk = rdev->saved_raid_disk;
-       else
-               disk = first;
-       for ( ; disk <= last ; disk++) {
+               first = rdev->saved_raid_disk;
+
+       for (disk = first; disk <= last; disk++) {
                p = conf->disks + disk;
                if (p->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5499,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
-                       break;
+                       goto out;
                }
+       }
+       for (disk = first; disk <= last; disk++) {
+               p = conf->disks + disk;
                if (test_bit(WantReplacement, &p->rdev->flags) &&
                    p->replacement == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5515,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+out:
        print_raid5_conf(conf);
        return err;
 }
index 7d42c11c868434020c8aaddc2c6b9a0fee5d96aa..0cdbd742974ae0404ccf724d734ea5da08a6f821 100644 (file)
@@ -198,7 +198,6 @@ static int fops_open(struct file *file)
        struct saa7146_dev *dev = video_drvdata(file);
        struct saa7146_fh *fh = NULL;
        int result = 0;
-       enum v4l2_buf_type type;
 
        DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
 
@@ -207,10 +206,6 @@ static int fops_open(struct file *file)
 
        DEB_D("using: %p\n", dev);
 
-       type = vdev->vfl_type == VFL_TYPE_GRABBER
-            ? V4L2_BUF_TYPE_VIDEO_CAPTURE
-            : V4L2_BUF_TYPE_VBI_CAPTURE;
-
        /* check if an extension is registered */
        if( NULL == dev->ext ) {
                DEB_S("no extension registered for this device\n");
index 00a67326c1931dd01496a1fdb8544ae0ceb77206..39eab73b01ae9bb5e0a9c4d9667e3c99b4b51415 100644 (file)
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        if (minor == MAX_DVB_MINORS) {
                kfree(dvbdevfops);
                kfree(dvbdev);
+               up_write(&minor_rwsem);
                mutex_unlock(&dvbdev_register_lock);
                return -EINVAL;
        }
index 98ecaf0900d683c1560d92f9f48fcc12e98f7a84..3180f5b2a6a60d8bf3930876e788b3217efa8814 100644 (file)
@@ -516,9 +516,9 @@ static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
        if(cx24110_readreg(state,0x10)&0x40) {
                /* the RS error counter has finished one counting window */
                cx24110_writereg(state,0x10,0x60); /* select the byer reg */
-               cx24110_readreg(state, 0x12) |
+               (void)(cx24110_readreg(state, 0x12) |
                        (cx24110_readreg(state, 0x13) << 8) |
-                       (cx24110_readreg(state, 0x14) << 16);
+                       (cx24110_readreg(state, 0x14) << 16));
                cx24110_writereg(state,0x10,0x70); /* select the bler reg */
                state->lastbler=cx24110_readreg(state,0x12)|
                        (cx24110_readreg(state,0x13)<<8)|
index 945404991529d1517a046281d807b23385aed6c3..ed3b0ba624dec672aaf350790731aa597027346b 100644 (file)
@@ -121,7 +121,7 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
        if (ret)
                goto error;
 
-       switch ((buf[0] >> 0) & 0x03) {
+       switch ((buf[0] >> 0) & 0x07) {
        case 0:
                c->modulation = QAM_16;
                break;
index a3ab1a5b6597fd71933639489d9769c91e42ef8a..cc11260e99df96f7ce19837e6ba268811aa825cf 100644 (file)
@@ -126,7 +126,7 @@ static int lg216x_write_regs(struct lg216x_state *state,
 
        lg_reg("writing %d registers...\n", len);
 
-       for (i = 0; i < len - 1; i++) {
+       for (i = 0; i < len; i++) {
                ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
                if (lg_fail(ret))
                        return ret;
index 63c004a25e0b811badb95b5b4d5029bcbc031548..664e460f247b5a7d1d53629959da7705fb259aa1 100644 (file)
@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { USB_DEVICE(0x2040, 0xc0a0),
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+       { USB_DEVICE(0x2040, 0xf5a0),
+               .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { } /* Terminating entry */
        };
 
index 740a3d5520c7f8301bd5b637005d1e98343c59c6..b415211d0c4bee08a38093f3ddb11c9e9918a79e 100644 (file)
@@ -157,7 +157,7 @@ static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_devi
                goto err_out_free_region;
 
        dev->io = pci_resource_start(pdev, 0);
-       if (snd_tea575x_init(&dev->tea)) {
+       if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
                printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
                goto err_out_free_region;
        }
index 52b8011f1b2314f5b9b12c76aef1b1ae9c1f6cf5..4efcbec74c52dd0f63532891f4f7ff8e7176e83e 100644 (file)
@@ -238,7 +238,7 @@ static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
        snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
                        fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
 
-       if (snd_tea575x_init(&fmr2->tea)) {
+       if (snd_tea575x_init(&fmr2->tea, THIS_MODULE)) {
                printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
                release_region(fmr2->io, 2);
                return -ENODEV;
index e9f63876129623fb0e9e887bf9d295867d49272e..f412f7ab270b63e0f39bc47aefdf58b3d3e5f82c 100644 (file)
@@ -51,6 +51,8 @@ static struct usb_device_id si470x_usb_driver_id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
        /* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */
        { USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) },
+       /* Axentia ALERT FM USB Receiver */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) },
        /* Terminating entry */
        { }
 };
index 342c2c8c1ddfcff71f493f3f6f08a4f1969df440..54ee34872d143cea7345f60a5ad9038e91266a51 100644 (file)
@@ -232,7 +232,7 @@ MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
 
 static bool txandrx; /* default = 0 */
 module_param(txandrx, bool, 0444);
-MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
+MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
 
 static unsigned int wake_sc = 0x800F040C;
 module_param(wake_sc, uint, 0644);
@@ -1032,6 +1032,8 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
        data->dev->tx_ir = wbcir_tx;
        data->dev->priv = data;
        data->dev->dev.parent = &device->dev;
+       data->dev->timeout = MS_TO_NS(100);
+       data->dev->allowed_protos = RC_TYPE_ALL;
 
        if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
                dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
index ff2933ab705ff2608dd38827dcab12ff0b0ca2ee..856ab962cd63516b79865d47414a2b7b59fa0926 100644 (file)
@@ -371,7 +371,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -384,7 +383,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -398,7 +396,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomux        = { 4, 0, 2, 3 },
                .gpiomute       = 1,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -414,7 +411,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -427,7 +423,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0, 1, 0, 1 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -440,7 +435,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x0f,
                .gpiomux        = { 0x0c, 0x04, 0x08, 0x04 },
                /*                0x04 for some cards ?? */
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= avermedia_tvphone_audio,
@@ -454,7 +448,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -469,7 +462,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0xc00, 0x800, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -482,7 +474,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 3,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 1, 2, 3 },
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -496,7 +487,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 0, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -510,7 +500,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20001,0x10001, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -524,7 +513,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 14, 11, 7 },
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -536,7 +524,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 14, 11, 7 },
-               .needs_tvaudio  = 1,
                .msp34xx_alt    = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -553,7 +540,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -567,7 +553,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 1, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -583,7 +568,6 @@ struct tvcard bttv_tvcards[] = {
                /* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */
                .gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
                .gpiomute       = 0x002000,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -597,7 +581,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1, 0),
                .gpiomux        = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
                .gpiomute       = 0xcfa007,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
                .volume_gpio    = winview_volume,
@@ -611,7 +594,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 0, 0, 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -660,7 +642,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 0x800, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -691,7 +672,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = {0x400, 0x400, 0x400, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -706,7 +686,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= terratv_audio,
@@ -720,7 +699,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 0, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -748,7 +726,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0x00000 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= terratv_audio,
@@ -793,7 +770,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
                .muxsel_hook    = PXC200_muxsel,
@@ -834,7 +810,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -847,7 +822,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x500, 0, 0x300, 0x900 },
                .gpiomute       = 0x900,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -874,7 +848,6 @@ struct tvcard bttv_tvcards[] = {
                Note: There exists another variant "Winfast 2000" with tv stereo !?
                Note: eeprom only contains FF and pci subsystem id 107d:6606
                */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .has_radio      = 1,
                .tuner_type     = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
@@ -934,7 +907,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0x551400, 0x551200, 0, 0 },
                .gpiomute       = 0x551c00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
                .tuner_addr     = ADDR_UNSET,
@@ -949,7 +921,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0xd0001, 0, 0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -966,7 +937,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomux        = { 4, 0, 2, 3 },
                .gpiomute       = 1,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -980,7 +950,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 4, 11, 7 },
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -995,7 +964,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0, 0},
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
@@ -1066,7 +1034,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_35,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
@@ -1084,7 +1051,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = {2,0,0,0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1163,7 +1129,6 @@ struct tvcard bttv_tvcards[] = {
                                MUX2 (mask 0x30000):
                                        0,2,3= from MSP34xx
                                        1= FM stereo Radio from Tuner */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1179,7 +1144,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0x10, 8 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1218,7 +1182,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1250,7 +1213,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(3, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_35,
                .tuner_type     = TUNER_ABSENT,
@@ -1266,7 +1228,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x400, 0x400, 0x400, 0x400 },
                .gpiomute       = 0x800,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_4036FY5_NTSC,
                .tuner_addr     = ADDR_UNSET,
@@ -1312,7 +1273,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2),
                .gpiomux        = { },
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -1329,7 +1289,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 1, 0, 4, 4 },
                .gpiomute       = 9,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1379,7 +1338,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomute       = 0x1800,
                .audio_mode_gpio= fv2000s_audio,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1393,7 +1351,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x500, 0x500, 0x300, 0x900 },
                .gpiomute       = 0x900,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1477,7 +1434,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */
                .gpiomute       = 13,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_LG_PAL_I_FM,
                .tuner_addr     = ADDR_UNSET,
@@ -1514,7 +1470,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x01, 0x00, 0x03, 0x03 },
                .gpiomute       = 0x09,
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -1540,7 +1495,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -1567,7 +1521,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -1597,7 +1550,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -1619,7 +1571,6 @@ struct tvcard bttv_tvcards[] = {
                                                * btwincap uses 0x80000/0x80003
                                                */
                .gpiomute       = 4,
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -1655,7 +1606,6 @@ struct tvcard bttv_tvcards[] = {
                /* .audio_inputs= 1, */
                .svhs           = 2,
                .muxsel         = MUXSEL(2, 0, 1, 1),
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1875,7 +1825,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 3},
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -1902,7 +1851,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -1920,7 +1868,6 @@ struct tvcard bttv_tvcards[] = {
                /*                  Tuner, Radio, external, internal, off,  on */
                .gpiomux        = { 0x08,  0x0f,  0x0a,     0x08 },
                .gpiomute       = 0x0f,
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_NTSC,
@@ -1936,7 +1883,6 @@ struct tvcard bttv_tvcards[] = {
                .svhs           = 2,
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 1),
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -2034,7 +1980,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -2049,7 +1994,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2062,7 +2006,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2079,7 +2022,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0),
                .muxsel_hook    = phytec_muxsel,
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2094,7 +2036,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1),
                .muxsel_hook    = phytec_muxsel,
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2118,7 +2059,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
                .svhs           = NO_SVHS,   /* card has no svhs */
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .no_tda7432     = 1,
                .gpiomask       = 0x00,
@@ -2168,7 +2108,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 3,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 1, 1, 1 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_35,
@@ -2210,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .no_msp34xx     = 1,
                .no_tda7432     = 1,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -2222,7 +2160,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .svhs           = 2,
-               .needs_tvaudio  = 0,
                .gpiomask       = 0x68,
                .muxsel         = MUXSEL(2, 3, 1),
                .gpiomux        = { 0x68, 0x68, 0x61, 0x61 },
@@ -2241,7 +2178,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -2265,7 +2201,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
                .pll            = PLL_28,
-               .needs_tvaudio  = 0,
                .muxsel_hook    = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2358,7 +2293,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -2405,7 +2339,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_addr     = ADDR_UNSET,
                .gpiomask       = 0x008007,
                .gpiomux        = { 0, 0x000001,0,0 },
-               .needs_tvaudio  = 1,
                .has_radio      = 1,
        },
        [BTTV_BOARD_TIBET_CS16] = {
@@ -2518,7 +2451,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
                .gpiomute       = 0x002000,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_YMEC_TVF66T5_B_DFF,
                .tuner_addr     = 0xc1 >>1,
@@ -2534,7 +2466,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TENA_9533_DI,
                .tuner_addr     = ADDR_UNSET,
@@ -2615,7 +2546,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
@@ -2714,7 +2644,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20001,0x10001, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
                .tuner_addr     = ADDR_UNSET,
@@ -2746,7 +2675,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 }, /* CONTVFMi */
                .gpiomute       = 3, /* CONTVFMi */
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -2785,7 +2713,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(0, 2, 3, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2799,7 +2726,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2813,7 +2739,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(3, 2, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2877,7 +2802,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -3649,7 +3573,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
                struct tuner_setup tun_setup;
 
                /* Load tuner module before issuing tuner config call! */
-               if (bttv_tvcards[btv->c.type].has_radio)
+               if (btv->has_radio)
                        v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
                                &btv->c.i2c_adap, "tuner",
                                0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
@@ -3664,7 +3588,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
                tun_setup.type = btv->tuner_type;
                tun_setup.addr = addr;
 
-               if (bttv_tvcards[btv->c.type].has_radio)
+               if (btv->has_radio)
                        tun_setup.mode_mask |= T_RADIO;
 
                bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
@@ -3724,6 +3648,10 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
                        bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
                btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
        }
+
+       /* The 61334 needs the msp3410 to do the radio demod to get sound */
+       if (tv.model == 61334)
+               btv->radio_uses_msp_demodulator = 1;
 }
 
 static int terratec_active_radio_upgrade(struct bttv *btv)
index a9cfb0f4be48183ee13a0b042013448ac73395e8..ff7a589d8e0f5e0486dd72c8870e4d965a8ce50e 100644 (file)
@@ -1218,6 +1218,11 @@ audio_mux(struct bttv *btv, int input, int mute)
                   For now this is sufficient. */
                switch (input) {
                case TVAUDIO_INPUT_RADIO:
+                       /* Some boards need the msp do to the radio demod */
+                       if (btv->radio_uses_msp_demodulator) {
+                               in = MSP_INPUT_DEFAULT;
+                               break;
+                       }
                        in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
                                    MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
                        break;
index c5171619ac7998369511c515da9046beda33d315..acfe2f3b92d9abdbb050a6586a7d9f4f310409e6 100644 (file)
@@ -236,7 +236,6 @@ struct tvcard {
        /* i2c audio flags */
        unsigned int no_msp34xx:1;
        unsigned int no_tda7432:1;
-       unsigned int needs_tvaudio:1;
        unsigned int msp34xx_alt:1;
        /* Note: currently no card definition needs to mark the presence
           of a RDS saa6588 chip. If this is ever needed, then add a new
index db943a8d580db131a1bcd4daaece5b6bf0552093..70fd4f23f605aa8374799239ca71ca18219bf9d2 100644 (file)
@@ -440,6 +440,7 @@ struct bttv {
        /* radio data/state */
        int has_radio;
        int radio_user;
+       int radio_uses_msp_demodulator;
 
        /* miro/pinnacle + Aimslab VHX
           philips matchbox (tea5757 radio tuner) support */
index 2520219f01ba39fbc99ce3c17eaaa071e7097644..5b75a64b199bc924016a3d75e13a0c48f87a82e9 100644 (file)
@@ -607,8 +607,9 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
                                }
                                o = i * pixels_per_line + pixels_read + k;
                                if (o < len) {
+                                       u8 ch = invert - buffer[k];
                                        got++;
-                                       put_user((invert - buffer[k]) << shift, buf + o);
+                                       put_user(ch << shift, buf + o);
                                }
                        }
                        pixels_read += bytes;
@@ -648,8 +649,8 @@ static int qcam_querycap(struct file *file, void  *priv,
        struct qcam *qcam = video_drvdata(file);
 
        strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
-       strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
-       strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+       strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
+       strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
        vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
        vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
        return 0;
@@ -688,8 +689,8 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
        pix->height = qcam->height / qcam->transfer_scale;
        pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
        pix->field = V4L2_FIELD_NONE;
-       pix->bytesperline = qcam->width;
-       pix->sizeimage = qcam->width * qcam->height;
+       pix->bytesperline = pix->width;
+       pix->sizeimage = pix->width * pix->height;
        /* Just a guess */
        pix->colorspace = V4L2_COLORSPACE_SRGB;
        return 0;
@@ -757,7 +758,7 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
                  "4-Bit Monochrome", V4L2_PIX_FMT_Y4,
                  { 0, 0, 0, 0 }
                },
-               { 0, 0, 0,
+               { 1, 0, 0,
                  "6-Bit Monochrome", V4L2_PIX_FMT_Y6,
                  { 0, 0, 0, 0 }
                },
@@ -772,6 +773,25 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
        return 0;
 }
 
+static int qcam_enum_framesizes(struct file *file, void *fh,
+                                        struct v4l2_frmsizeenum *fsize)
+{
+       static const struct v4l2_frmsize_discrete sizes[] = {
+               {  80,  60 },
+               { 160, 120 },
+               { 320, 240 },
+       };
+
+       if (fsize->index > 2)
+               return -EINVAL;
+       if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
+           fsize->pixel_format != V4L2_PIX_FMT_Y6)
+               return -EINVAL;
+       fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+       fsize->discrete = sizes[fsize->index];
+       return 0;
+}
+
 static ssize_t qcam_read(struct file *file, char __user *buf,
                size_t count, loff_t *ppos)
 {
@@ -795,6 +815,11 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
        return len;
 }
 
+static unsigned int qcam_poll(struct file *filp, poll_table *wait)
+{
+       return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
+}
+
 static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct qcam *qcam =
@@ -828,7 +853,7 @@ static const struct v4l2_file_operations qcam_fops = {
        .owner          = THIS_MODULE,
        .open           = v4l2_fh_open,
        .release        = v4l2_fh_release,
-       .poll           = v4l2_ctrl_poll,
+       .poll           = qcam_poll,
        .unlocked_ioctl = video_ioctl2,
        .read           = qcam_read,
 };
@@ -839,6 +864,7 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
        .vidioc_s_input                     = qcam_s_input,
        .vidioc_enum_input                  = qcam_enum_input,
        .vidioc_enum_fmt_vid_cap            = qcam_enum_fmt_vid_cap,
+       .vidioc_enum_framesizes             = qcam_enum_framesizes,
        .vidioc_g_fmt_vid_cap               = qcam_g_fmt_vid_cap,
        .vidioc_s_fmt_vid_cap               = qcam_s_fmt_vid_cap,
        .vidioc_try_fmt_vid_cap             = qcam_try_fmt_vid_cap,
@@ -864,9 +890,9 @@ static struct qcam *qcam_init(struct parport *port)
                return NULL;
 
        v4l2_dev = &qcam->v4l2_dev;
-       strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
+       snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%d", num_cams);
 
-       if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+       if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
                v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
                kfree(qcam);
                return NULL;
@@ -886,7 +912,7 @@ static struct qcam *qcam_init(struct parport *port)
                return NULL;
        }
        qcam->pport = port;
-       qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+       qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
                        NULL, 0, NULL);
        if (qcam->pdev == NULL) {
                v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
@@ -975,6 +1001,7 @@ static int init_bwqcam(struct parport *port)
                return -ENODEV;
        }
        qc_calibrate(qcam);
+       v4l2_ctrl_handler_setup(&qcam->hdl);
 
        parport_release(qcam->pdev);
 
index b55d57cc1a1c24972fad9e56f3eea804dccab535..7e5ffd6f51786d3390b629b8794b4139aa410551 100644 (file)
@@ -838,10 +838,10 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
        }
 
        CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
-                  "irq: %d, latency: %d, memory: 0x%lx\n",
+                  "irq: %d, latency: %d, memory: 0x%llx\n",
                   cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
                   PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
-                  cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);
+                  cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
 
        return 0;
 }
@@ -938,7 +938,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
        if (retval)
                goto err;
 
-       CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+       CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);
 
        /* PCI Device Setup */
        retval = cx18_setup_pci(cx, pci_dev, pci_id);
@@ -946,8 +946,8 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
                goto free_workqueues;
 
        /* map io memory */
-       CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+       CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
        cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
                                       CX18_MEM_SIZE);
        if (!cx->enc_mem) {
index 7a37e0ee136f095aa8cfb8c696811aa2165efe46..2767c64df0c87f9c044aca755a63fbfd0c75adf3 100644 (file)
@@ -622,7 +622,7 @@ struct cx18 {
                                   unique ID. Starts at 1, so 0 can be used as
                                   uninitialized value in the stream->id. */
 
-       u32 base_addr;
+       resource_size_t base_addr;
 
        u8 card_rev;
        void __iomem *enc_mem, *reg_mem;
index 1b3fb502e6be5fdb31164407a72f8de96fa9712a..b85c292a849ac2debe36c828e506254cd65214a7 100644 (file)
@@ -164,8 +164,13 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
 
        apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
        while (offset + sizeof(seghdr) < fw->size) {
-               /* TODO: byteswapping */
-               memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+               const u32 *shptr = src + offset / 4;
+
+               seghdr.sync1 = le32_to_cpu(shptr[0]);
+               seghdr.sync2 = le32_to_cpu(shptr[1]);
+               seghdr.addr = le32_to_cpu(shptr[2]);
+               seghdr.size = le32_to_cpu(shptr[3]);
+
                offset += sizeof(seghdr);
                if (seghdr.sync1 != APU_ROM_SYNC1 ||
                    seghdr.sync2 != APU_ROM_SYNC2) {
index ed8118390b02ab9fd3ec38bd53d549053c2d5616..eabf00c6351b82365494d27bbac6ffd7c8a83c29 100644 (file)
@@ -434,6 +434,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
 {
        u32 handle, mdl_ack_offset, mdl_ack_count;
        struct cx18_mailbox *mb;
+       int i;
 
        mb = &order->mb;
        handle = mb->args[0];
@@ -447,8 +448,9 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
                return -1;
        }
 
-       cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
-                          sizeof(struct cx18_mdl_ack) * mdl_ack_count);
+       for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
+               ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
+                       cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
 
        if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
                mb_ack_irq(cx, order);
@@ -538,6 +540,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
        struct cx18_mailbox *order_mb;
        struct cx18_in_work_order *order;
        int submit;
+       int i;
 
        switch (rpu) {
        case CPU:
@@ -562,10 +565,12 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
        order_mb = &order->mb;
 
        /* mb->cmd and mb->args[0] through mb->args[2] */
-       cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
+       for (i = 0; i < 4; i++)
+               (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
+
        /* mb->request and mb->ack.  N.B. we want to read mb->ack last */
-       cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
-                          2 * sizeof(u32));
+       for (i = 0; i < 2; i++)
+               (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
 
        if (order_mb->request == order_mb->ack) {
                CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
index 068f78dc5d13fa0268eef854b4956d18f7521bba..b4c99c7270cf8fc8d28d26226248208f8b930dad 100644 (file)
@@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvisocpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = URB_ISO_ASAP;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->interval = 1;
                urb->complete = cx231xx_audio_isocirq;
@@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvbulkpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->complete = cx231xx_audio_bulkirq;
                urb->transfer_buffer_length = sb_size;
index 3d15314e1f88d1ff71924a7a65362dd13b75fd12..ac7db52f404ffbc9c95207f7a2bd5b1414aa4187 100644 (file)
@@ -448,7 +448,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
                        return -ENOMEM;
                }
                dev->vbi_mode.bulk_ctl.urb[i] = urb;
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
 
                dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
                    kzalloc(sb_size, GFP_KERNEL);
index 13739e002a63fb6460bec16572bad10581fcb998..080e11157e5fe89afdb384c702376d80528daf5d 100644 (file)
@@ -127,22 +127,37 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1250] = {
                .name           = "Hauppauge WinTV-HVR1250",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
+               .tuner_type     = TUNER_PHILIPS_TDA8290,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .tuner_bus      = 1,
+#endif
+               .force_bff      = 1,
                .input          = {{
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
                        .type   = CX23885_VMUX_TELEVISION,
-                       .vmux   = 0,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1,
+                       .amux   = CX25840_AUDIO8,
                        .gpio0  = 0xff00,
                }, {
-                       .type   = CX23885_VMUX_DEBUG,
-                       .vmux   = 0,
-                       .gpio0  = 0xff01,
-               }, {
+#endif
                        .type   = CX23885_VMUX_COMPOSITE1,
-                       .vmux   = 1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                }, {
                        .type   = CX23885_VMUX_SVIDEO,
-                       .vmux   = 2,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                } },
        },
@@ -267,7 +282,55 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1255] = {
                .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
+               .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_COMPOSITE1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
+       },
+       [CX23885_BOARD_HAUPPAUGE_HVR1255_22111] = {
+               .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1210] = {
                .name           = "Hauppauge WinTV-HVR1210",
@@ -624,7 +687,7 @@ struct cx23885_subid cx23885_subids[] = {
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2259,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255_22111,
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2291,
@@ -900,7 +963,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
        struct cx23885_dev *dev = port->dev;
        u32 bitmask = 0;
 
-       if (command == XC2028_RESET_CLK)
+       if ((command == XC2028_RESET_CLK) || (command == XC2028_I2C_FLUSH))
                return 0;
 
        if (command != 0) {
@@ -1130,6 +1193,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
                /* GPIO-6 I2C Gate which can isolate the demod from the bus */
@@ -1267,6 +1331,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1400:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* FIXME: Implement me */
                break;
@@ -1424,6 +1489,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1511,6 +1577,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1526,10 +1593,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
         */
        switch (dev->board) {
        case CX23885_BOARD_TEVII_S470:
-       case CX23885_BOARD_HAUPPAUGE_HVR1250:
                /* Currently only enabled for the integrated IR controller */
                if (!enable_885_ir)
                        break;
+       case CX23885_BOARD_HAUPPAUGE_HVR1250:
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
        case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
        case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,6 +1606,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
        case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_MYGICA_X8506:
index a80a92c474558a4c555a473c89aa1df568c88288..cd542684ba022c4f194928e37cfc318d6a1b0b79 100644 (file)
@@ -712,6 +712,7 @@ static int dvb_register(struct cx23885_tsport *port)
                }
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
                i2c_bus = &dev->i2c_bus[0];
                fe0->dvb.frontend = dvb_attach(s5h1411_attach,
                                               &hcw_s5h1411_config,
@@ -721,6 +722,11 @@ static int dvb_register(struct cx23885_tsport *port)
                                   0x60, &dev->i2c_bus[1].i2c_adap,
                                   &hauppauge_tda18271_config);
                }
+
+               tda18271_attach(&dev->ts1.analog_fe,
+                       0x60, &dev->i2c_bus[1].i2c_adap,
+                       &hauppauge_tda18271_config);
+
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
                i2c_bus = &dev->i2c_bus[0];
index c654bdc7ccb201dd4e285c0b7ddfe703ab89cb61..22f8e7fbd6656fe81f33f824832a38cc7e2dd14c 100644 (file)
@@ -505,6 +505,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
 
        if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
                (dev->board == CX23885_BOARD_MPX885) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
                (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
                /* Configure audio routing */
                v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
@@ -1578,7 +1581,9 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
 
        fe = vfe->dvb.frontend;
 
-       if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+       if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111))
                fe = &dev->ts1.analog_fe;
 
        if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1608,6 +1613,8 @@ int cx23885_set_frequency(struct file *file, void *priv,
        int ret;
 
        switch (dev->board) {
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
                ret = cx23885_set_freq_via_ops(dev, f);
                break;
index d884784a1c8582f8a97c379dacc5ef603d2f934c..13c37ec07ae7e250b54694aad7c821c670fd584f 100644 (file)
@@ -90,6 +90,7 @@
 #define CX23885_BOARD_MYGICA_X8507             33
 #define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
 #define CX23885_BOARD_TEVII_S471               35
+#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111  36
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
index 83c1aa6b2e6c9a8762065e4c5994c62f033f3fba..f11f6f07e9154cd4d00790031d3da5e63c1ec8d1 100644 (file)
@@ -904,9 +904,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
        list_add_tail(&dev->devlist, &cx25821_devlist);
        mutex_unlock(&cx25821_devlist_mutex);
 
-       strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
-       strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
-
        if (dev->pci->device != 0x8210) {
                pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
                        __func__, dev->pci->device);
index b9aa801b00a7b29c01c45334dd14d6c7c61bb978..029f2934a6d88bccdb409becdfd17849b99dff3b 100644 (file)
@@ -187,7 +187,7 @@ enum port {
 };
 
 struct cx25821_board {
-       char *name;
+       const char *name;
        enum port porta;
        enum port portb;
        enum port portc;
index fc1ff69cffd0d4917e86292573691be2436282fc..d8eac3e30a7ea99217e5b149de79752e73f594cf 100644 (file)
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
 
 
 /* ----------------------------------------------------------------------- */
-static void cx23885_std_setup(struct i2c_client *client);
+static void cx23888_std_setup(struct i2c_client *client);
 
 int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
 {
@@ -638,10 +638,13 @@ static void cx23885_initialize(struct i2c_client *client)
        finish_wait(&state->fw_wait, &wait);
        destroy_workqueue(q);
 
-       /* Call the cx23885 specific std setup func, we no longer rely on
+       /* Call the cx23888 specific std setup func, we no longer rely on
         * the generic cx24840 func.
         */
-       cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
+       else
+               cx25840_std_setup(client);
 
        /* (re)set input */
        set_input(client, state->vid_input, state->aud_input);
@@ -1103,9 +1106,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
 
                        cx25840_write4(client, 0x410, 0xffff0dbf);
                        cx25840_write4(client, 0x414, 0x00137d03);
-                       cx25840_write4(client, 0x418, 0x01008080);
+
+                       /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is 
+                          CHROMA_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x418, 0x01008080);
+                       else
+                               cx25840_write4(client, 0x418, 0x01000000);
+
                        cx25840_write4(client, 0x41c, 0x00000000);
-                       cx25840_write4(client, 0x420, 0x001c3e0f);
+
+                       /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is 
+                          CRUSH_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x420, 0x001c3e0f);
+                       else
+                               cx25840_write4(client, 0x420, 0x001c8282);
+
                        cx25840_write4(client, 0x42c, 0x42600000);
                        cx25840_write4(client, 0x430, 0x0000039b);
                        cx25840_write4(client, 0x438, 0x00000000);
@@ -1233,7 +1250,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
                cx25840_write4(client, 0x8d0, 0x1f063870);
        }
 
-       if (is_cx2388x(state)) {
+       if (is_cx23888(state)) {
                /* HVR1850 */
                /* AUD_IO_CTRL - I2S Input, Parallel1*/
                /*  - Channel 1 src - Parallel1 (Merlin out) */
@@ -1298,8 +1315,8 @@ static int set_v4lstd(struct i2c_client *client)
        }
        cx25840_and_or(client, 0x400, ~0xf, fmt);
        cx25840_and_or(client, 0x403, ~0x3, pal_m);
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        else
                cx25840_std_setup(client);
        if (!is_cx2583x(state))
@@ -1312,6 +1329,7 @@ static int set_v4lstd(struct i2c_client *client)
 static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct v4l2_subdev *sd = to_sd(ctrl);
+       struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
        switch (ctrl->id) {
@@ -1324,12 +1342,20 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
                break;
 
        case V4L2_CID_SATURATION:
-               cx25840_write(client, 0x420, ctrl->val << 1);
-               cx25840_write(client, 0x421, ctrl->val << 1);
+               if (is_cx23888(state)) {
+                       cx25840_write(client, 0x418, ctrl->val << 1);
+                       cx25840_write(client, 0x419, ctrl->val << 1);
+               } else {
+                       cx25840_write(client, 0x420, ctrl->val << 1);
+                       cx25840_write(client, 0x421, ctrl->val << 1);
+               }
                break;
 
        case V4L2_CID_HUE:
-               cx25840_write(client, 0x422, ctrl->val);
+               if (is_cx23888(state))
+                       cx25840_write(client, 0x41a, ctrl->val);
+               else
+                       cx25840_write(client, 0x422, ctrl->val);
                break;
 
        default:
@@ -1354,11 +1380,21 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
        fmt->field = V4L2_FIELD_INTERLACED;
        fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
-       Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
-       Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+       } else {
+               Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       }
 
-       Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
-       Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+       } else {
+               Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       }
 
        Vlines = fmt->height + (is_50Hz ? 4 : 7);
 
@@ -1782,8 +1818,8 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
 
        return set_input(client, input, state->aud_input);
 }
@@ -1794,8 +1830,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        return set_input(client, state->vid_input, input);
 }
 
@@ -4939,7 +4975,7 @@ void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
        }
 }
 
-static void cx23885_std_setup(struct i2c_client *client)
+static void cx23888_std_setup(struct i2c_client *client)
 {
        struct cx25840_state *state = to_state(i2c_get_clientdata(client));
        v4l2_std_id std = state->std;
index e46446a449c090c91dc43e4eeb1e34e2baf2c16f..ed7b2aa1ed831d4582c68679476ce8b08b733da7 100644 (file)
@@ -471,7 +471,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
        dprintk(1,"Loading firmware ...\n");
        dataptr = (u32*)firmware->data;
        for (i = 0; i < (firmware->size >> 2); i++) {
-               value = *dataptr;
+               value = le32_to_cpu(*dataptr);
                checksum += ~value;
                memory_write(dev->core, i, value);
                dataptr++;
index 20a7e24de6fba66e3ce806153a3dda5995c34fed..862c6575c55791fa7a6f4488d7cd933226fd02cb 100644 (file)
@@ -974,6 +974,7 @@ struct em28xx_board em28xx_boards[] = {
        [EM2884_BOARD_CINERGY_HTC_STICK] = {
                .name         = "Terratec Cinergy HTC Stick",
                .has_dvb      = 1,
+               .ir_codes     = RC_MAP_NEC_TERRATEC_CINERGY_XS,
 #if 0
                .tuner_type   = TUNER_PHILIPS_TDA8290,
                .tuner_addr   = 0x41,
@@ -2892,7 +2893,7 @@ static void request_module_async(struct work_struct *work)
 
        if (dev->board.has_dvb)
                request_module("em28xx-dvb");
-       if (dev->board.has_ir_i2c && !disable_ir)
+       if (dev->board.ir_codes && !disable_ir)
                request_module("em28xx-rc");
 }
 
index fce5f7680c99603938a931ade5a54fc351e11299..5e30c4f3f248ac5427caf86201eaec09f83efc02 100644 (file)
@@ -527,6 +527,8 @@ static int em28xx_ir_init(struct em28xx *dev)
 
        if (dev->board.ir_codes == NULL) {
                /* No remote control support */
+               em28xx_warn("Remote control support is not available for "
+                               "this card.\n");
                return 0;
        }
 
index 137166d73945fdd94fee6a1dc4dc9115da65dd4e..31721eadc597de48e749f6202a98ddb819910cbf 100644 (file)
@@ -1653,7 +1653,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
                                enum v4l2_buf_type buf_type)
 {
        struct gspca_dev *gspca_dev = video_drvdata(file);
-       int ret;
+       int i, ret;
 
        if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
                return -EINVAL;
@@ -1678,6 +1678,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
        wake_up_interruptible(&gspca_dev->wq);
 
        /* empty the transfer queues */
+       for (i = 0; i < gspca_dev->nframes; i++)
+               gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
        atomic_set(&gspca_dev->fr_q, 0);
        atomic_set(&gspca_dev->fr_i, 0);
        gspca_dev->fr_o = 0;
index b5acb1e4b4e7ce26dc0b644411872847ca8e5f77..80c81dd6d68b3ce4d6a40566449b89a110ce05a4 100644 (file)
@@ -96,7 +96,7 @@ static void setbrightness(struct gspca_dev *gspca_dev);
 static void setcontrast(struct gspca_dev *gspca_dev);
 static void setgain(struct gspca_dev *gspca_dev);
 static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static void setagc(struct gspca_dev *gspca_dev);
 static void setawb(struct gspca_dev *gspca_dev);
 static void setaec(struct gspca_dev *gspca_dev);
 static void setsharpness(struct gspca_dev *gspca_dev);
@@ -189,7 +189,7 @@ static const struct ctrl sd_ctrls[] = {
                        .step    = 1,
                        .default_value = 1,
                },
-               .set = sd_setagc
+               .set_control = setagc
        },
 [AWB] = {
                {
@@ -851,6 +851,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
        int i;
 
        for (i = 0; i < 5; i++) {
+               msleep(10);
                data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
 
                switch (data) {
@@ -1242,10 +1243,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
 
        cam->ctrls = sd->ctrls;
 
-       /* the auto white balance control works only when auto gain is set */
-       if (sd_ctrls[AGC].qctrl.default_value == 0)
-               gspca_dev->ctrl_inac |= (1 << AWB);
-
        cam->cam_mode = ov772x_mode;
        cam->nmodes = ARRAY_SIZE(ov772x_mode);
 
@@ -1486,29 +1483,6 @@ scan_next:
        } while (remaining_len > 0);
 }
 
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
-{
-       struct sd *sd = (struct sd *) gspca_dev;
-
-       sd->ctrls[AGC].val = val;
-
-       /* the auto white balance control works only
-        * when auto gain is set */
-       if (val) {
-               gspca_dev->ctrl_inac &= ~(1 << AWB);
-       } else {
-               gspca_dev->ctrl_inac |= (1 << AWB);
-               if (sd->ctrls[AWB].val) {
-                       sd->ctrls[AWB].val = 0;
-                       if (gspca_dev->streaming)
-                               setawb(gspca_dev);
-               }
-       }
-       if (gspca_dev->streaming)
-               setagc(gspca_dev);
-       return gspca_dev->usb_err;
-}
-
 static int sd_querymenu(struct gspca_dev *gspca_dev,
                struct v4l2_querymenu *menu)
 {
index b5797308a39b365b2d5b075fc462efe9e2a1d977..1fd41f0d2e9514a408db394e9a613bc4ee2badd7 100644 (file)
@@ -1008,6 +1008,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
        int i;
 
        for (i = 0; i < 5; i++) {
+               msleep(10);
                data = reg_r(gspca_dev, OV534_REG_STATUS);
 
                switch (data) {
index 2cb7d95f7be7ef7c21d465ac36a4dbed3c86155e..115da169f32af62de40570cdb25f179abad21fba 100644 (file)
@@ -418,7 +418,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
        struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
 
        gspca_dev->vdev.ctrl_handler = hdl;
-       v4l2_ctrl_handler_init(hdl, 4);
+       v4l2_ctrl_handler_init(hdl, 5);
 
        sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                                        V4L2_CID_CONTRAST, 0, 15, 1, 7);
index ad098202d7f0fa086900d9bbde499615917f7b2f..b9c6f17eabb245fde118e3b198f163782d92a7bc 100644 (file)
@@ -1761,7 +1761,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_SATURATION, 0, 255, 1, 127);
        sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_HUE, -180, 180, 1, 0);
-       v4l2_ctrl_cluster(4, &sd->brightness);
 
        sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_GAMMA, 0, 255, 1, 0x10);
@@ -1770,7 +1769,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
        sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
-       v4l2_ctrl_cluster(2, &sd->blue);
 
        if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
            sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
@@ -1779,7 +1777,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_HFLIP, 0, 1, 1, 0);
                sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_VFLIP, 0, 1, 1, 0);
-               v4l2_ctrl_cluster(2, &sd->hflip);
        }
 
        if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
@@ -1794,6 +1791,20 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_GAIN, 0, 28, 1, 0);
                sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+       }
+
+       sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+                       V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+       if (hdl->error) {
+               pr_err("Could not initialize controls\n");
+               return hdl->error;
+       }
+
+       v4l2_ctrl_cluster(4, &sd->brightness);
+       v4l2_ctrl_cluster(2, &sd->blue);
+       if (sd->hflip)
+               v4l2_ctrl_cluster(2, &sd->hflip);
+       if (sd->autogain) {
                if (sd->sensor == SENSOR_SOI968)
                        /* this sensor doesn't have the exposure control and
                           autogain is clustered with gain instead. This works
@@ -1803,13 +1814,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        /* Otherwise autogain is clustered with exposure. */
                        v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
        }
-
-       sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
-                       V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
-       if (hdl->error) {
-               pr_err("Could not initialize controls\n");
-               return hdl->error;
-       }
        return 0;
 }
 
@@ -2066,10 +2070,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
        set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
        set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
                        v4l2_ctrl_g_ctrl(sd->red));
-       set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
-       set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
-       set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
-                       v4l2_ctrl_g_ctrl(sd->vflip));
+       if (sd->gain)
+               set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+       if (sd->exposure)
+               set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+       if (sd->hflip)
+               set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+                               v4l2_ctrl_g_ctrl(sd->vflip));
 
        reg_w1(gspca_dev, 0x1007, 0x20);
        reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2172,7 +2179,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
        int avg_lum;
 
-       if (!v4l2_ctrl_g_ctrl(sd->autogain))
+       if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain))
                return;
 
        avg_lum = atomic_read(&sd->avg_lum);
index 4d1696d1a7f4022d0cca06a90f40f6a912a5b877..f38faa9b37c3078270abdf408a29b65560e23fde 100644 (file)
@@ -3120,7 +3120,7 @@ static const struct sd_desc sd_desc = {
                        | (SENSOR_ ## sensor << 8) \
                        | (flags)
 static const struct usb_device_id device_table[] = {
-       {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
+       {USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)},
        {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
        {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
        {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
index 057929e165ab6f32fa875736f782d21ca8218c7f..5462ce2f60ea9dc647572a0ae93796c26f1b3775 100644 (file)
@@ -866,10 +866,10 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
        pci_write_config_dword(pdev, 0x40, 0xffff);
 
        IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
-                  "irq: %d, latency: %d, memory: 0x%lx\n",
+                  "irq: %d, latency: %d, memory: 0x%llx\n",
                   pdev->device, pdev->revision, pdev->bus->number,
                   PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
-                  pdev->irq, pci_latency, (unsigned long)itv->base_addr);
+                  pdev->irq, pci_latency, (u64)itv->base_addr);
 
        return 0;
 }
@@ -1007,7 +1007,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        itv->cxhdl.priv = itv;
        itv->cxhdl.func = ivtv_api_func;
 
-       IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
+       IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
 
        /* PCI Device Setup */
        retval = ivtv_setup_pci(itv, pdev, pci_id);
@@ -1017,8 +1017,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
                goto free_mem;
 
        /* map io memory */
-       IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
+       IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
        itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
                                       IVTV_ENCODER_SIZE);
        if (!itv->enc_mem) {
@@ -1034,8 +1034,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        }
 
        if (itv->has_cx23415) {
-               IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                               itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
+               IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                               (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
                itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
                                IVTV_DECODER_SIZE);
                if (!itv->dec_mem) {
@@ -1056,8 +1056,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        }
 
        /* map registers memory */
-       IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+       IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        itv->reg_mem =
            ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        if (!itv->reg_mem) {
index 2e220028aad2f383417904ce9d629d5bb4857d4e..a7e00f8938f818a1b2a0730fac2517f5005d65fb 100644 (file)
@@ -622,7 +622,7 @@ struct ivtv {
        struct v4l2_subdev *sd_video;   /* controlling video decoder subdev */
        struct v4l2_subdev *sd_audio;   /* controlling audio subdev */
        struct v4l2_subdev *sd_muxer;   /* controlling audio muxer subdev */
-       u32 base_addr;                  /* PCI resource base address */
+       resource_size_t base_addr;      /* PCI resource base address */
        volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
        volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
        volatile void __iomem *reg_mem; /* pointer to mapped registers */
index d2dec585e61b8d3c8bd2940f220bcaaf21c668a9..3945556f573384f77fb19752ac05b4bfdba23464 100644 (file)
@@ -110,22 +110,6 @@ enum {
        V4L2_M2M_DST = 1,
 };
 
-/* Source and destination queue data */
-static struct m2mtest_q_data q_data[2];
-
-static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
-{
-       switch (type) {
-       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-               return &q_data[V4L2_M2M_SRC];
-       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-               return &q_data[V4L2_M2M_DST];
-       default:
-               BUG();
-       }
-       return NULL;
-}
-
 #define V4L2_CID_TRANS_TIME_MSEC       V4L2_CID_PRIVATE_BASE
 #define V4L2_CID_TRANS_NUM_BUFS                (V4L2_CID_PRIVATE_BASE + 1)
 
@@ -198,8 +182,26 @@ struct m2mtest_ctx {
        int                     aborting;
 
        struct v4l2_m2m_ctx     *m2m_ctx;
+
+       /* Source and destination queue data */
+       struct m2mtest_q_data   q_data[2];
 };
 
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+                                        enum v4l2_buf_type type)
+{
+       switch (type) {
+       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+               return &ctx->q_data[V4L2_M2M_SRC];
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+               return &ctx->q_data[V4L2_M2M_DST];
+       default:
+               BUG();
+       }
+       return NULL;
+}
+
+
 static struct v4l2_queryctrl *get_ctrl(int id)
 {
        int i;
@@ -223,7 +225,7 @@ static int device_process(struct m2mtest_ctx *ctx,
        int tile_w, bytes_left;
        int width, height, bytesperline;
 
-       q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
 
        width   = q_data->width;
        height  = q_data->height;
@@ -436,7 +438,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
        if (!vq)
                return -EINVAL;
 
-       q_data = get_q_data(f->type);
+       q_data = get_q_data(ctx, f->type);
 
        f->fmt.pix.width        = q_data->width;
        f->fmt.pix.height       = q_data->height;
@@ -535,7 +537,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
        if (!vq)
                return -EINVAL;
 
-       q_data = get_q_data(f->type);
+       q_data = get_q_data(ctx, f->type);
        if (!q_data)
                return -EINVAL;
 
@@ -747,7 +749,7 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
        struct m2mtest_q_data *q_data;
        unsigned int size, count = *nbuffers;
 
-       q_data = get_q_data(vq->type);
+       q_data = get_q_data(ctx, vq->type);
 
        size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
 
@@ -775,7 +777,7 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
 
        dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
 
-       q_data = get_q_data(vb->vb2_queue->type);
+       q_data = get_q_data(ctx, vb->vb2_queue->type);
 
        if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
                dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -860,6 +862,9 @@ static int m2mtest_open(struct file *file)
        ctx->transtime = MEM2MEM_DEF_TRANSTIME;
        ctx->num_processed = 0;
 
+       ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+       ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+
        ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
 
        if (IS_ERR(ctx->m2m_ctx)) {
@@ -986,9 +991,6 @@ static int m2mtest_probe(struct platform_device *pdev)
                goto err_m2m;
        }
 
-       q_data[V4L2_M2M_SRC].fmt = &formats[0];
-       q_data[V4L2_M2M_DST].fmt = &formats[0];
-
        return 0;
 
        v4l2_m2m_release(dev->m2m_dev);
index ded26b7286faaea01bac1220dfc9c4ea8a0a273b..637bde8aca28e25c2799cd85aded6eedafa3075c 100644 (file)
@@ -83,6 +83,7 @@
 #define CSICR1_INV_DATA                (1 << 3)
 #define CSICR1_INV_PCLK                (1 << 2)
 #define CSICR1_REDGE           (1 << 1)
+#define CSICR1_FMT_MASK                (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
 
 #define SHIFT_STATFF_LEVEL     22
 #define SHIFT_RXFF_LEVEL       19
@@ -230,6 +231,7 @@ struct mx2_prp_cfg {
        u32 src_pixel;
        u32 ch1_pixel;
        u32 irq_flags;
+       u32 csicr1;
 };
 
 /* prp resizing parameters */
@@ -330,6 +332,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .ch1_pixel      = 0x2ca00565, /* RGB565 */
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
                                                PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+                       .csicr1         = 0,
                }
        },
        {
@@ -343,6 +346,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
                                        PRP_INTR_CH2FC | PRP_INTR_LBOVF |
                                        PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_PACK_DIR,
                }
        },
        {
@@ -356,6 +360,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
                                        PRP_INTR_CH2FC | PRP_INTR_LBOVF |
                                        PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_SWAP16_EN,
                }
        },
 };
@@ -984,7 +989,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct mx2_camera_dev *pcdev = ici->priv;
        struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
-       const struct soc_camera_format_xlate *xlate;
        unsigned long common_flags;
        int ret;
        int bytesperline;
@@ -1029,24 +1033,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
                return ret;
        }
 
-       xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
-       if (!xlate) {
-               dev_warn(icd->parent, "Format %x not found\n", pixfmt);
-               return -EINVAL;
-       }
-
-       if (xlate->code == V4L2_MBUS_FMT_YUYV8_2X8) {
-               csicr1 |= CSICR1_PACK_DIR;
-               csicr1 &= ~CSICR1_SWAP16_EN;
-               dev_dbg(icd->parent, "already yuyv format, don't convert\n");
-       } else if (xlate->code == V4L2_MBUS_FMT_UYVY8_2X8) {
-               csicr1 &= ~CSICR1_PACK_DIR;
-               csicr1 |= CSICR1_SWAP16_EN;
-               dev_dbg(icd->parent, "convert uyvy mbus format into yuyv\n");
-       } else {
-               dev_warn(icd->parent, "mbus format not supported\n");
-               return -EINVAL;
-       }
+       csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
 
        if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
                csicr1 |= CSICR1_REDGE;
@@ -1155,18 +1142,6 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
                }
        }
 
-       if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
-               formats++;
-               if (xlate) {
-                       xlate->host_fmt =
-                               soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
-                       xlate->code     = code;
-                       dev_dbg(dev, "Providing host format %s for sensor code %d\n",
-                               xlate->host_fmt->name, code);
-                       xlate++;
-               }
-       }
-
        /* Generic pass-trough */
        formats++;
        if (xlate) {
index 8a4935ecc655e9c114cd2f7defb51498f50d38c5..dd91da26f1b088f66bdb40b212fa9b1c84a46b5a 100644 (file)
@@ -888,12 +888,12 @@ static const struct preview_update update_attrs[] = {
                preview_config_contrast,
                NULL,
                offsetof(struct prev_params, contrast),
-               0, true,
+               0, 0, true,
        }, /* OMAP3ISP_PREV_BRIGHTNESS */ {
                preview_config_brightness,
                NULL,
                offsetof(struct prev_params, brightness),
-               0, true,
+               0, 0, true,
        },
 };
 
@@ -1102,7 +1102,7 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
        unsigned int elv = prev->crop.top + prev->crop.height - 1;
        u32 features;
 
-       if (format->code == V4L2_MBUS_FMT_Y10_1X10) {
+       if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
                sph -= 2;
                eph += 2;
                slv -= 2;
index af2d9086d7e8a6e26bbb2d14171b68ce19cf19b5..b4c679b3fb0f7a052550a7d5a71ea42367e49ceb 100644 (file)
 #include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/isa.h>
 #include <asm/io.h>
index 354574591908ee9cbbea18a5c518e04e2b9b4839..725812aa0c3044f5ffa31190a734e75ffcc9c01b 100644 (file)
@@ -350,7 +350,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
                if (pixm)
                        sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
                else
-                       sizes[i] = size;
+                       sizes[i] = max_t(u32, size, frame->payload[i]);
+
                allocators[i] = ctx->fimc_dev->alloc_ctx;
        }
 
@@ -479,37 +480,39 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
 static int fimc_capture_open(struct file *file)
 {
        struct fimc_dev *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
-
-       if (ret)
-               return ret;
+       int ret;
 
        dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
 
-       /* Return if the corresponding video mem2mem node is already opened. */
        if (fimc_m2m_active(fimc))
                return -EBUSY;
 
        set_bit(ST_CAPT_BUSY, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               return ret;
 
-       if (++fimc->vid_cap.refcnt == 1) {
-               ret = fimc_pipeline_initialize(&fimc->pipeline,
-                              &fimc->vid_cap.vfd->entity, true);
-               if (ret < 0) {
-                       dev_err(&fimc->pdev->dev,
-                               "Video pipeline initialization failed\n");
-                       pm_runtime_put_sync(&fimc->pdev->dev);
-                       fimc->vid_cap.refcnt--;
-                       v4l2_fh_release(file);
-                       clear_bit(ST_CAPT_BUSY, &fimc->state);
-                       return ret;
-               }
-               ret = fimc_capture_ctrls_create(fimc);
+       ret = v4l2_fh_open(file);
+       if (ret)
+               return ret;
 
-               if (!ret && !fimc->vid_cap.user_subdev_api)
-                       ret = fimc_capture_set_default_format(fimc);
+       if (++fimc->vid_cap.refcnt != 1)
+               return 0;
+
+       ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                      &fimc->vid_cap.vfd->entity, true);
+       if (ret < 0) {
+               clear_bit(ST_CAPT_BUSY, &fimc->state);
+               pm_runtime_put_sync(&fimc->pdev->dev);
+               fimc->vid_cap.refcnt--;
+               v4l2_fh_release(file);
+               return ret;
        }
+       ret = fimc_capture_ctrls_create(fimc);
+
+       if (!ret && !fimc->vid_cap.user_subdev_api)
+               ret = fimc_capture_set_default_format(fimc);
+
        return ret;
 }
 
@@ -818,9 +821,6 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_ctx *ctx = fimc->vid_cap.ctx;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        return fimc_fill_format(&ctx->d_frame, f);
 }
 
@@ -833,9 +833,6 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
        struct v4l2_mbus_framefmt mf;
        struct fimc_fmt *ffmt = NULL;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
                fimc_capture_try_format(ctx, &pix->width, &pix->height,
                                        NULL, &pix->pixelformat,
@@ -887,8 +884,6 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
        struct fimc_fmt *s_fmt = NULL;
        int ret, i;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
        if (vb2_is_busy(&fimc->vid_cap.vbq))
                return -EBUSY;
 
@@ -924,10 +919,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
                pix->width  = mf->width;
                pix->height = mf->height;
        }
+
        fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
        for (i = 0; i < ff->fmt->colplanes; i++)
-               ff->payload[i] =
-                       (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+               ff->payload[i] = pix->plane_fmt[i].sizeimage;
 
        set_frame_bounds(ff, pix->width, pix->height);
        /* Reset the composition rectangle if not yet configured */
@@ -1045,18 +1040,22 @@ static int fimc_cap_streamon(struct file *file, void *priv,
 {
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_pipeline *p = &fimc->pipeline;
+       struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
        int ret;
 
        if (fimc_capture_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&p->subdevs[IDX_SENSOR]->entity,
-                                   p->m_pipeline);
+       ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        if (fimc->vid_cap.user_subdev_api) {
                ret = fimc_pipeline_validate(fimc);
-               if (ret)
+               if (ret < 0) {
+                       media_entity_pipeline_stop(&sd->entity);
                        return ret;
+               }
        }
        return vb2_streamon(&fimc->vid_cap.vbq, type);
 }
index fedcd561ba27f37122caa01adf2da1199c3db536..a4646ca1d56f31fb30eb35d7cd9829fa5aa9f46a 100644 (file)
@@ -153,7 +153,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+               .name           = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
                .fourcc         = V4L2_PIX_FMT_NV12M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -161,7 +161,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+               .name           = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
                .fourcc         = V4L2_PIX_FMT_YUV420M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 2, 2 },
@@ -169,7 +169,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 3,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+               .name           = "YUV 4:2:0 non-contig. 2p, tiled",
                .fourcc         = V4L2_PIX_FMT_NV12MT,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -615,7 +615,7 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
        ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
 
        if (!handler->error) {
-               v4l2_ctrl_cluster(3, &ctrls->colorfx);
+               v4l2_ctrl_cluster(2, &ctrls->colorfx);
                ctrls->ready = true;
        }
 
@@ -641,7 +641,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
        if (!ctrls->ready)
                return;
 
-       mutex_lock(&ctrls->handler.lock);
+       mutex_lock(ctrls->handler.lock);
        v4l2_ctrl_activate(ctrls->rotate, active);
        v4l2_ctrl_activate(ctrls->hflip, active);
        v4l2_ctrl_activate(ctrls->vflip, active);
@@ -660,7 +660,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
                ctx->hflip    = 0;
                ctx->vflip    = 0;
        }
-       mutex_unlock(&ctrls->handler.lock);
+       mutex_unlock(ctrls->handler.lock);
 }
 
 /* Update maximum value of the alpha color control */
@@ -741,8 +741,8 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
        pix->width = width;
 
        for (i = 0; i < pix->num_planes; ++i) {
-               u32 bpl = pix->plane_fmt[i].bytesperline;
-               u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+               struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+               u32 bpl = plane_fmt->bytesperline;
 
                if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
                        bpl = pix->width; /* Planar */
@@ -754,8 +754,9 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
                if (i == 0) /* Same bytesperline for each plane. */
                        bytesperline = bpl;
 
-               pix->plane_fmt[i].bytesperline = bytesperline;
-               *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
+               plane_fmt->bytesperline = bytesperline;
+               plane_fmt->sizeimage = max((pix->width * pix->height *
+                                  fmt->depth[i]) / 8, plane_fmt->sizeimage);
        }
 }
 
index 400d701aef04126d8d3b469a51b7a9adb360985c..74ff310db30cd6755393b5b6eaf2a933495fa0e9 100644 (file)
@@ -451,34 +451,44 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
 static int fimc_lite_open(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        set_bit(ST_FLITE_IN_USE, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               goto done;
 
-       if (++fimc->ref_count != 1 || fimc->out_path != FIMC_IO_DMA)
-               return ret;
+       ret = v4l2_fh_open(file);
+       if (ret < 0)
+               goto done;
 
-       ret = fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity,
-                                      true);
-       if (ret < 0) {
-               v4l2_err(fimc->vfd, "Video pipeline initialization failed\n");
-               pm_runtime_put_sync(&fimc->pdev->dev);
-               fimc->ref_count--;
-               v4l2_fh_release(file);
-               clear_bit(ST_FLITE_IN_USE, &fimc->state);
-       }
+       if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+               ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                              &fimc->vfd->entity, true);
+               if (ret < 0) {
+                       pm_runtime_put_sync(&fimc->pdev->dev);
+                       fimc->ref_count--;
+                       v4l2_fh_release(file);
+                       clear_bit(ST_FLITE_IN_USE, &fimc->state);
+               }
 
-       fimc_lite_clear_event_counters(fimc);
+               fimc_lite_clear_event_counters(fimc);
+       }
+done:
+       mutex_unlock(&fimc->lock);
        return ret;
 }
 
 static int fimc_lite_close(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
                clear_bit(ST_FLITE_IN_USE, &fimc->state);
@@ -492,20 +502,39 @@ static int fimc_lite_close(struct file *file)
        if (fimc->ref_count == 0)
                vb2_queue_release(&fimc->vb_queue);
 
-       return v4l2_fh_release(file);
+       ret = v4l2_fh_release(file);
+
+       mutex_unlock(&fimc->lock);
+       return ret;
 }
 
 static unsigned int fimc_lite_poll(struct file *file,
                                   struct poll_table_struct *wait)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_poll(&fimc->vb_queue, file, wait);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return POLL_ERR;
+
+       ret = vb2_poll(&fimc->vb_queue, file, wait);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_mmap(&fimc->vb_queue, vma);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
+
+       ret = vb2_mmap(&fimc->vb_queue, vma);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static const struct v4l2_file_operations fimc_lite_fops = {
@@ -762,7 +791,9 @@ static int fimc_lite_streamon(struct file *file, void *priv,
        if (fimc_lite_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        ret = fimc_pipeline_validate(fimc);
        if (ret) {
@@ -1508,7 +1539,7 @@ static int fimc_lite_suspend(struct device *dev)
                return 0;
 
        ret = fimc_lite_stop_capture(fimc, suspend);
-       if (ret)
+       if (ret < 0 || !fimc_lite_active(fimc))
                return ret;
 
        return fimc_pipeline_shutdown(&fimc->pipeline);
index 6753c45631b856e1a06d19492e56e97edf01f9f6..52cef4865423ef2be451df098d0c2ada3412fe21 100644 (file)
@@ -193,9 +193,13 @@ int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
 
 int fimc_pipeline_shutdown(struct fimc_pipeline *p)
 {
-       struct media_entity *me = &p->subdevs[IDX_SENSOR]->entity;
+       struct media_entity *me;
        int ret;
 
+       if (!p || !p->subdevs[IDX_SENSOR])
+               return -EINVAL;
+
+       me = &p->subdevs[IDX_SENSOR]->entity;
        mutex_lock(&me->parent->graph_mutex);
        ret = __fimc_pipeline_shutdown(p);
        mutex_unlock(&me->parent->graph_mutex);
@@ -498,12 +502,12 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
  * @source: the source entity to create links to all fimc entities from
  * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
  * @pad: the source entity pad index
- * @fimc_id: index of the fimc device for which link should be enabled
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
  */
 static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                                            struct media_entity *source,
                                            struct v4l2_subdev *sensor,
-                                           int pad, int fimc_id)
+                                           int pad, int link_mask)
 {
        struct fimc_sensor_info *s_info;
        struct media_entity *sink;
@@ -520,7 +524,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc[i]->variant->has_cam_if)
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
 
                sink = &fmd->fimc[i]->vid_cap.subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -552,7 +556,10 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc_lite[i])
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+                       flags = MEDIA_LNK_FL_ENABLED;
+               else
+                       flags = 0;
 
                sink = &fmd->fimc_lite[i]->subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -614,9 +621,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
        struct s5p_fimc_isp_info *pdata;
        struct fimc_sensor_info *s_info;
        struct media_entity *source, *sink;
-       int i, pad, fimc_id = 0;
-       int ret = 0;
-       u32 flags;
+       int i, pad, fimc_id = 0, ret = 0;
+       u32 flags, link_mask = 0;
 
        for (i = 0; i < fmd->num_sensors; i++) {
                if (fmd->sensor[i].subdev == NULL)
@@ -668,19 +674,20 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                if (source == NULL)
                        continue;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
-       fimc_id = 0;
        for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
                if (fmd->csis[i].sd == NULL)
                        continue;
                source = &fmd->csis[i].sd->entity;
                pad = CSIS_PAD_SOURCE;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
        /* Create immutable links between each FIMC's subdev and video node */
@@ -734,8 +741,8 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
 }
 
 static int __fimc_md_set_camclk(struct fimc_md *fmd,
-                                        struct fimc_sensor_info *s_info,
-                                        bool on)
+                               struct fimc_sensor_info *s_info,
+                               bool on)
 {
        struct s5p_fimc_isp_info *pdata = s_info->pdata;
        struct fimc_camclk_info *camclk;
@@ -744,12 +751,10 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
        if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
                return -EINVAL;
 
-       if (s_info->clk_on == on)
-               return 0;
        camclk = &fmd->camclk[pdata->clk_id];
 
-       dbg("camclk %d, f: %lu, clk: %p, on: %d",
-           pdata->clk_id, pdata->clk_frequency, camclk, on);
+       dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+           pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
 
        if (on) {
                if (camclk->use_count > 0 &&
@@ -760,11 +765,9 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
                        clk_set_rate(camclk->clock, pdata->clk_frequency);
                        camclk->frequency = pdata->clk_frequency;
                        ret = clk_enable(camclk->clock);
+                       dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+                           clk_get_rate(camclk->clock));
                }
-               s_info->clk_on = 1;
-               dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
-                   clk_get_rate(camclk->clock));
-
                return ret;
        }
 
@@ -773,7 +776,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
 
        if (--camclk->use_count == 0) {
                clk_disable(camclk->clock);
-               s_info->clk_on = 0;
                dbg("Disabled camclk %d", pdata->clk_id);
        }
        return ret;
@@ -789,8 +791,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
  * devices to which sensors can be attached, either directly or through
  * the MIPI CSI receiver. The clock is allowed here to be used by
  * multiple sensors concurrently if they use same frequency.
- * The per sensor subdev clk_on attribute helps to synchronize accesses
- * to the sclk_cam clocks from the video and media device nodes.
  * This function should only be called when the graph mutex is held.
  */
 int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
index 3b8a3492a17671fc86658db8eb410861e8cba8f2..1f5dbaff5442a7df686b4e6dc023952b8436939f 100644 (file)
@@ -47,7 +47,6 @@ struct fimc_camclk_info {
  * @pdata: sensor's atrributes passed as media device's platform data
  * @subdev: image sensor v4l2 subdev
  * @host: fimc device the sensor is currently linked to
- * @clk_on: sclk_cam clock's state associated with this subdev
  *
  * This data structure applies to image sensor and the writeback subdevs.
  */
@@ -55,7 +54,6 @@ struct fimc_sensor_info {
        struct s5p_fimc_isp_info *pdata;
        struct v4l2_subdev *subdev;
        struct fimc_dev *host;
-       bool clk_on;
 };
 
 /**
index 053a8a872fd743ca1f8e3cae687f827247b9453e..a19bece41ba9d0c4a53845943c8219e27659382b 100644 (file)
                                                                decoded pic */
 #define S5P_FIMV_SI_DISPLAY_Y_ADR      0x2010 /* luma addr of displayed pic */
 #define S5P_FIMV_SI_DISPLAY_C_ADR      0x2014 /* chroma addrof displayed pic */
+
 #define S5P_FIMV_SI_CONSUMED_BYTES     0x2018 /* Consumed number of bytes to
                                                        decode a frame */
 #define S5P_FIMV_SI_DISPLAY_STATUS     0x201c /* status of decoded picture */
 
+#define S5P_FIMV_SI_DECODE_Y_ADR       0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR       0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS      0x202c /* status of decoded picture */
+
 #define S5P_FIMV_SI_CH0_SB_ST_ADR      0x2044 /* start addr of stream buf */
 #define S5P_FIMV_SI_CH0_SB_FRM_SIZE    0x2048 /* size of stream buf */
 #define S5P_FIMV_SI_CH0_DESC_ADR       0x204c /* addr of descriptor buf */
index c25ec022d2678f734e46b1f1f3b5b0d496150c8d..feea867f318c25a6bce189d2bf6e95fb91a42422 100644 (file)
@@ -627,13 +627,13 @@ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
 
        switch (ctrl->id) {
        case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
-               ctx->loop_filter_mpeg4 = ctrl->val;
+               ctx->display_delay = ctrl->val;
                break;
        case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
                ctx->display_delay_enable = ctrl->val;
                break;
        case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
-               ctx->display_delay = ctrl->val;
+               ctx->loop_filter_mpeg4 = ctrl->val;
                break;
        case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
                ctx->slice_interface = ctrl->val;
@@ -996,6 +996,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
 
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_dec_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index acedb2004be325e541928cf1f567c525d9a5ffa6..158b78989b89dc43dd7ced55dec2ca91a6baa28e 100644 (file)
@@ -243,12 +243,6 @@ static struct mfc_control controls[] = {
                .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
                .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
                .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
-               .menu_skip_mask = ~(
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
-                               ),
        },
        {
                .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
@@ -494,7 +488,7 @@ static struct mfc_control controls[] = {
                .type = V4L2_CTRL_TYPE_MENU,
                .minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
                .maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
-               .default_value = 0,
+               .default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
                .menu_skip_mask = 0,
        },
        {
@@ -534,7 +528,7 @@ static struct mfc_control controls[] = {
                .type = V4L2_CTRL_TYPE_MENU,
                .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
                .maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
-               .default_value = 0,
+               .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
                .menu_skip_mask = 0,
        },
        {
@@ -907,6 +901,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
                        mfc_err("failed to try output format\n");
                        return -EINVAL;
                }
+               v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+                       &pix_fmt_mp->height, 4, 1080, 1, 0);
        } else {
                mfc_err("invalid buf type\n");
                return -EINVAL;
@@ -1777,6 +1773,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
        }
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_enc_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index db83836e6a9fc05b98a11d1edf7a267c74b99b53..5932d1c782c5dba36e18b55a6a3dcf6cda9f77da 100644 (file)
@@ -57,10 +57,12 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
                                        S5P_FIMV_SI_DISPLAY_Y_ADR) << \
                                        MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dec_y_adr()                (readl(dev->regs_base + \
-                                       S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+                                       S5P_FIMV_SI_DECODE_Y_ADR) << \
                                        MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dspl_status()      readl(dev->regs_base + \
                                                S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status()       readl(dev->regs_base + \
+                                               S5P_FIMV_SI_DECODE_STATUS)
 #define s5p_mfc_get_frame_type()       (readl(dev->regs_base + \
                                                S5P_FIMV_DECODE_FRAME_TYPE) \
                                        & S5P_FIMV_DECODE_FRAME_MASK)
index 764eac6bcc4c91d732880b31f024389f03fde692..cf962a4662766db5b1888f01c7e910fe245447c6 100644 (file)
@@ -13,8 +13,7 @@
 #ifndef S5P_MFC_SHM_H_
 #define S5P_MFC_SHM_H_
 
-enum MFC_SHM_OFS
-{
+enum MFC_SHM_OFS {
        EXTENEDED_DECODE_STATUS = 0x00, /* D */
        SET_FRAME_TAG           = 0x04, /* D */
        GET_FRAME_TAG_TOP       = 0x08, /* D */
index f7b35ff443bf976d192ee5522bbd765beb1a04cb..fb99ff18be077255810f43e71fd0a9163ee883b5 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_SMIAPP
        tristate "SMIA++/SMIA sensor support"
-       depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
        select VIDEO_SMIAPP_PLL
        ---help---
          This is a generic driver for SMIA++/SMIA camera modules.
index f518026cb67b6e4099522b75ccdc12eeff5f8f24..9cf5bda35fbe1cfe332e8b18ae3d9218b3e8f54b 100644 (file)
@@ -31,7 +31,9 @@
 #include <linux/device.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 #include <linux/v4l2-mediabus.h>
 #include <media/v4l2-device.h>
 
index 3e050e12153b3522d67c701c3187dcfd6b037536..1ad5ab6ce5cf9ea46187d2ac03224033957b1247 100644 (file)
@@ -1178,7 +1178,7 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
                return 0;
        if (vt->type == t->mode && analog_ops->get_afc)
                vt->afc = analog_ops->get_afc(&t->fe);
-       if (t->mode != V4L2_TUNER_RADIO) {
+       if (vt->type != V4L2_TUNER_RADIO) {
                vt->capability |= V4L2_TUNER_CAP_NORM;
                vt->rangelow = tv_range[0] * 16;
                vt->rangehigh = tv_range[1] * 16;
index 5ccbd4629f9c34eb48322e29a5c0dc3aec668a4b..0cbada18f6f57376d980d34345335279499fec03 100644 (file)
@@ -656,7 +656,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
        SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
        SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
        SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
-       if (ops->vidioc_g_parm || vdev->current_norm)
+       if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
                set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
        SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
        SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
@@ -679,6 +679,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
        SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
        SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
        SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
        /* yes, really vidioc_subscribe_event */
        SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
        SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
index 91be4e871f43644c2052c942092a3f808688c9a8..d7fa8962d8b3129940514cb487d0d75a8931dbdd 100644 (file)
@@ -1680,6 +1680,7 @@ static long __video_do_ioctl(struct file *file,
                                break;
 
                        ret = 0;
+                       p->parm.capture.readbuffers = 2;
                        if (ops->vidioc_g_std)
                                ret = ops->vidioc_g_std(file, fh, &std);
                        if (ret == 0)
index 4d7391ec80013279d965db1070f978f3948cf241..aae1720b2f2d14a0ceb1ca1080fafdb125d28fc4 100644 (file)
@@ -2561,7 +2561,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs)
        } else if (vino_drvdata->decoder
                   && (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
                int input;
-               int data_norm;
+               int data_norm = 0;
                v4l2_std_id norm;
 
                input = VINO_INPUT_COMPOSITE;
@@ -2651,7 +2651,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input)
                }
 
                if (vino_drvdata->decoder_owner == vcs->channel) {
-                       int data_norm;
+                       int data_norm = 0;
                        v4l2_std_id norm;
 
                        ret = decoder_call(video, s_routing,
index 0960d7f0d3947a7fa4b6ae727b5ca55739b2b854..08c10240e70fba4063f2abd8cab3904b1f935a4b 100644 (file)
@@ -1149,10 +1149,14 @@ static ssize_t
 vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
 {
        struct vivi_dev *dev = video_drvdata(file);
+       int err;
 
        dprintk(dev, 1, "read called\n");
-       return vb2_read(&dev->vb_vidq, data, count, ppos,
+       mutex_lock(&dev->mutex);
+       err = vb2_read(&dev->vb_vidq, data, count, ppos,
                       file->f_flags & O_NONBLOCK);
+       mutex_unlock(&dev->mutex);
+       return err;
 }
 
 static unsigned int
index a5c591ffe395d01b6e7e09f927c03f3048688620..d99db5623acf45039f53dd9eb7e6b9620d88735d 100644 (file)
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
        unsigned long    port;
        u32              msize;
        u32              psize;
-       u8               revision;
        int              r = -ENODEV;
        struct pci_dev *pdev;
 
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
                return r;
        }
 
-       pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-
        if (sizeof(dma_addr_t) > 4) {
                const uint64_t required_mask = dma_get_required_mask
                    (&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        MPT_ADAPTER     *ioc;
        u8               cb_idx;
        int              r = -ENODEV;
-       u8               revision;
        u8               pcixcmd;
        static int       mpt_ids = 0;
 #ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
            ioc->name, &ioc->facts, &ioc->pfacts[0]));
 
-       pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-       mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+       mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
+                            ioc->prod_name);
 
        switch (pdev->device)
        {
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
                break;
 
        case MPI_MANUFACTPAGE_DEVICEID_FC929X:
-               if (revision < XL_929) {
+               if (pdev->revision < XL_929) {
                        /* 929X Chip Fix. Set Split transactions level
                        * for PCIX. Set MOST bits to zero.
                        */
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
                /* 1030 Chip Fix. Disable Split transactions
                 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
                 */
-               if (revision < C0_1030) {
+               if (pdev->revision < C0_1030) {
                        pci_read_config_byte(pdev, 0x6a, &pcixcmd);
                        pcixcmd &= 0x8F;
                        pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
                                printk(MYIOC_s_INFO_FMT "%s: host reset in"
                                        " progress mpt_config timed out.!!\n",
                                        __func__, ioc->name);
+                               mutex_unlock(&ioc->mptbase_cmds.mutex);
                                return -EFAULT;
                        }
                        spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
index 6e6e16aab9dae36e8a1d736ba436c3e0431ca749..b383b6961e59549c8075b197d89adb6417b252cf 100644 (file)
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
        int                     iocnum;
        unsigned int            port;
        int                     cim_rev;
-       u8                      revision;
        struct scsi_device      *sdev;
        VirtDevice              *vdevice;
 
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
        pdev = (struct pci_dev *) ioc->pcidev;
 
        karg->pciId = pdev->device;
-       pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-       karg->hwRev = revision;
+       karg->hwRev = pdev->revision;
        karg->subSystemDevice = pdev->subsystem_device;
        karg->subSystemVendor = pdev->subsystem_vendor;
 
index e129c820df7da7d6430e62891558f92cdbe235ab..92144ed1ad469d8257eab6636c4d7f8cc4c65180 100644 (file)
@@ -286,6 +286,7 @@ config TWL6040_CORE
        depends on I2C=y && GENERIC_HARDIRQS
        select MFD_CORE
        select REGMAP_I2C
+       select IRQ_DOMAIN
        default n
        help
          Say yes here if you want support for Texas Instruments TWL6040 audio
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
deleted file mode 100644 (file)
index 63b30b1..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Shared definitions and data structures for the AB5500 MFD driver
- */
-
-/* Read/write operation values. */
-#define AB5500_PERM_RD (0x01)
-#define AB5500_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB5500_PERM_RO (AB5500_PERM_RD)
-#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
-
-#define AB5500_MASK_BASE (0x60)
-#define AB5500_MASK_END (0x79)
-#define AB5500_CHIP_ID (0x20)
-
-/**
- * struct ab5500_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab5500_reg_range {
-       u8 first;
-       u8 last;
-       u8 perm;
-};
-
-/**
- * struct ab5500_i2c_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_ranges {
-       u8 nranges;
-       u8 bankid;
-       const struct ab5500_reg_range *range;
-};
-
-/**
- * struct ab5500_i2c_banks
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_banks {
-       u8 nbanks;
-       const struct ab5500_i2c_ranges *bank;
-};
-
-/**
- * struct ab5500_bank
- * @slave_addr: I2C slave_addr found in AB5500 specification
- * @name: Documentation name of the bank. For reference
- */
-struct ab5500_bank {
-       u8 slave_addr;
-       const char *name;
-};
-
-static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
-       [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
-               AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
-       [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
-               AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
-       [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
-       [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
-       [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
-       [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
-       [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
-       [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
-       [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
-       [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
-       [AB5500_BANK_FG_BATTCOM_ACC] = {
-               AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
-       [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
-       [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
-       [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
-       [AB5500_BANK_AUDIO_HEADSETUSB] = {
-               AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
-};
-
-int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
-       u8 *value);
-int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
-       u8 reg, u8 bitmask, u8 bitvalues);
index 671c8bc14bbcb0bf3c91c450b14af6c0c34b4f19..50e83dc5dc49b7520dfab72c52c80bb02e485f4c 100644 (file)
@@ -2735,6 +2735,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
        REGULATOR_SUPPLY("vcore", "uart2"),
        REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
        REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
+       REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
 };
 
 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
index 3fcdab3eb8eb67fb02a08c02166613e11742924c..03df422feb763ab075ac5fa741ef09b71c066d1d 100644 (file)
@@ -49,10 +49,72 @@ static struct regmap_config mc13xxx_regmap_spi_config = {
        .reg_bits = 7,
        .pad_bits = 1,
        .val_bits = 24,
+       .write_flag_mask = 0x80,
 
        .max_register = MC13XXX_NUMREGS,
 
        .cache_type = REGCACHE_NONE,
+       .use_single_rw = 1,
+};
+
+static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
+                               void *val, size_t val_size)
+{
+       unsigned char w[4] = { *((unsigned char *) reg), 0, 0, 0};
+       unsigned char r[4];
+       unsigned char *p = val;
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+       struct spi_transfer t = {
+               .tx_buf = w,
+               .rx_buf = r,
+               .len = 4,
+       };
+
+       struct spi_message m;
+       int ret;
+
+       if (val_size != 3 || reg_size != 1)
+               return -ENOTSUPP;
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t, &m);
+       ret = spi_sync(spi, &m);
+
+       memcpy(p, &r[1], 3);
+
+       return ret;
+}
+
+static int mc13xxx_spi_write(void *context, const void *data, size_t count)
+{
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+
+       if (count != 4)
+               return -ENOTSUPP;
+
+       return spi_write(spi, data, count);
+}
+
+/*
+ * We cannot use regmap-spi generic bus implementation here.
+ * The MC13783 chip will get corrupted if CS signal is deasserted
+ * and on i.Mx31 SoC (the target SoC for MC13783 PMIC) the SPI controller
+ * has the following errata (DSPhl22960):
+ * "The CSPI negates SS when the FIFO becomes empty with
+ * SSCTL= 0. Software cannot guarantee that the FIFO will not
+ * drain because of higher priority interrupts and the
+ * non-realtime characteristics of the operating system. As a
+ * result, the SS will negate before all of the data has been
+ * transferred to/from the peripheral."
+ * We workaround this by accessing the SPI controller with a
+ * single transfert.
+ */
+
+static struct regmap_bus regmap_mc13xxx_bus = {
+       .write = mc13xxx_spi_write,
+       .read = mc13xxx_spi_read,
 };
 
 static int mc13xxx_spi_probe(struct spi_device *spi)
@@ -73,12 +135,13 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
 
        dev_set_drvdata(&spi->dev, mc13xxx);
        spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
-       spi->bits_per_word = 32;
 
        mc13xxx->dev = &spi->dev;
        mutex_init(&mc13xxx->lock);
 
-       mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+       mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
+                                       &mc13xxx_regmap_spi_config);
+
        if (IS_ERR(mc13xxx->regmap)) {
                ret = PTR_ERR(mc13xxx->regmap);
                dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
index 7e96bb2297244f7946537a5da5f78226e6a4db40..41088ecbb2a92e3c6350038ad61f77ceb500e8ce 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
+#include <linux/gpio.h>
 #include <plat/cpu.h>
 #include <plat/usb.h>
 #include <linux/pm_runtime.h>
@@ -500,8 +501,21 @@ static void omap_usbhs_init(struct device *dev)
        dev_dbg(dev, "starting TI HSUSB Controller\n");
 
        pm_runtime_get_sync(dev);
-       spin_lock_irqsave(&omap->lock, flags);
 
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+               /* Hold the PHY in RESET for enough time till DIR is high */
+               udelay(10);
+       }
+
+       spin_lock_irqsave(&omap->lock, flags);
        omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
        dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
@@ -581,9 +595,39 @@ static void omap_usbhs_init(struct device *dev)
        }
 
        spin_unlock_irqrestore(&omap->lock, flags);
+
+       if (pdata->ehci_data->phy_reset) {
+               /* Hold the PHY in RESET for enough time till
+                * PHY is settled and ready
+                */
+               udelay(10);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[0], 1);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[1], 1);
+       }
+
        pm_runtime_put_sync(dev);
 }
 
+static void omap_usbhs_deinit(struct device *dev)
+{
+       struct usbhs_hcd_omap           *omap = dev_get_drvdata(dev);
+       struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+       }
+}
+
 
 /**
  * usbhs_omap_probe - initialize TI-based HCDs
@@ -767,6 +811,7 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
        goto end_probe;
 
 err_alloc:
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
 
 err_tll:
@@ -818,6 +863,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
 {
        struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
 
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
        iounmap(omap->uhh_base);
        clk_put(omap->init_60m_fclk);
index 00c0aba7eba000de65742b04014419e0115c6eba..c4a69f193a1df1985abfbaeeffb8e39cda933493 100644 (file)
@@ -356,7 +356,14 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                }
        }
 
-       ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+       /* Change IRQ into clear on read mode for efficiency */
+       slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
+       addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
+       reg = PALMAS_INT_CTRL_INT_CLEAR;
+
+       regmap_write(palmas->regmap[slave], addr, reg);
+
+       ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
                        IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
                        &palmas->irq_data);
        if (ret < 0)
@@ -441,6 +448,9 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                goto err;
        }
 
+       children[PALMAS_PMIC_ID].platform_data = pdata->pmic_pdata;
+       children[PALMAS_PMIC_ID].pdata_size = sizeof(*pdata->pmic_pdata);
+
        ret = mfd_add_devices(palmas->dev, -1,
                              children, ARRAY_SIZE(palmas_children),
                              NULL, regmap_irq_chip_get_base(palmas->irq_data));
@@ -472,6 +482,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
        { "twl6035", },
        { "twl6037", },
        { "tps65913", },
+       { /* end */ }
 };
 MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
 
index 373f423b118164dbe1058fcba74694d4122c1527..947a06a1845f601980cc1d730decdc8129c1ad7e 100644 (file)
@@ -6,7 +6,7 @@
  *
  * License Terms: GNU General Public License, version 2
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/i2c.h>
index afd459013ecbb1e89485cf37f643f6d7d3c91a20..9edfe864cc056b5cd543fd696d63a325cda34274 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) ST Microelectronics SA 2011
  *
  * License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
index 93936f1b75eb1d89ac47a75284eebc3f1504df15..23f5463d4cae432e5e1b65ac97e5ba6042a2c59e 100644 (file)
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev,     s32 *slots,
                        struct mei_cl *cl,
                        struct mei_io_list *cmpl_list)
 {
-       if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
+       if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
                        sizeof(struct hbm_flow_control))) {
                /* return the cancel routine */
                list_del(&cb_pos->cb_list);
index c70333228337d28bc76d1c61510bb57b615d3a68..783fcd7365bc1e770739db884bba4a5088479e22 100644 (file)
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
                err = request_threaded_irq(pdev->irq,
                        NULL,
                        mei_interrupt_thread_handler,
-                       0, mei_driver_name, dev);
+                       IRQF_ONESHOT, mei_driver_name, dev);
        else
                err = request_threaded_irq(pdev->irq,
                        mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
        if (err) {
                dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
                       pdev->irq);
-               goto unmap_memory;
+               goto disable_msi;
        }
        INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
        if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
        mei_disable_interrupts(dev);
        flush_scheduled_work();
        free_irq(pdev->irq, dev);
+disable_msi:
        pci_disable_msi(pdev);
-unmap_memory:
        pci_iounmap(pdev, dev->mem_addr);
 free_device:
        kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
+
+       misc_deregister(&mei_misc_device);
 }
 #ifdef CONFIG_PM
 static int mei_pci_suspend(struct device *device)
@@ -1145,7 +1147,7 @@ static int mei_pci_resume(struct device *device)
                err = request_threaded_irq(pdev->irq,
                        NULL,
                        mei_interrupt_thread_handler,
-                       0, mei_driver_name, dev);
+                       IRQF_ONESHOT, mei_driver_name, dev);
        else
                err = request_threaded_irq(pdev->irq,
                        mei_interrupt_quick_handler,
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
  */
 static void __exit mei_exit_module(void)
 {
-       misc_deregister(&mei_misc_device);
        pci_unregister_driver(&mei_driver);
 
        pr_debug("unloaded successfully.\n");
index 6be5605707b46036c433a7e8e441eb8b4ee1ccca..e2ec0505eb5c05458568f8d12a7cdd6d3da9939d 100644 (file)
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
 };
 static const struct watchdog_info wd_info = {
                .identity = INTEL_AMT_WATCHDOG_ID,
-               .options = WDIOF_KEEPALIVEPING,
+               .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
 };
 
 static struct watchdog_device amt_wd_dev = {
index 17bbacb1b4b131b119b0076b339f44012fcc51f7..87b251ab6ec582f2c8177687d0b330d97110fb50 100644 (file)
@@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
 
                if (msg->activate_gru_mq_desc_gpa !=
                    part_uv->activate_gru_mq_desc_gpa) {
-                       spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+                       spin_lock(&part_uv->flags_lock);
                        part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
-                       spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+                       spin_unlock(&part_uv->flags_lock);
                        part_uv->activate_gru_mq_desc_gpa =
                            msg->activate_gru_mq_desc_gpa;
                }
index dd2d374dcc7aa43363a5366350f79c6a17e81a70..276d21ce6bc1ba6a18c844258331d6417f6278d7 100644 (file)
@@ -554,7 +554,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        struct mmc_request mrq = {NULL};
        struct mmc_command cmd = {0};
        struct mmc_data data = {0};
-       unsigned int timeout_us;
 
        struct scatterlist sg;
 
@@ -574,23 +573,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        cmd.arg = 0;
        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 
-       data.timeout_ns = card->csd.tacc_ns * 100;
-       data.timeout_clks = card->csd.tacc_clks * 100;
-
-       timeout_us = data.timeout_ns / 1000;
-       timeout_us += data.timeout_clks * 1000 /
-               (card->host->ios.clock / 1000);
-
-       if (timeout_us > 100000) {
-               data.timeout_ns = 100000000;
-               data.timeout_clks = 0;
-       }
-
        data.blksz = 4;
        data.blocks = 1;
        data.flags = MMC_DATA_READ;
        data.sg = &sg;
        data.sg_len = 1;
+       mmc_set_data_timeout(&data, card);
 
        mrq.cmd = &cmd;
        mrq.data = &data;
index f13e38deceac760fcbd9ae4cd2a1fdf7e5d79671..8f5dc08d65989d8526f8bc59d1ed91426e93108d 100644 (file)
@@ -50,8 +50,8 @@ int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
                goto egpioreq;
 
        ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
-                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                                  cd->label, host);
+                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                                  IRQF_ONESHOT, cd->label, host);
        if (ret < 0)
                goto eirqreq;
 
index 2d4a4b74675060133fecd7021fa2d774e5f303c9..4f4489aa6baede795ae21c222e0aa9e99e7b5800 100644 (file)
@@ -717,10 +717,6 @@ static int mmc_select_powerclass(struct mmc_card *card,
                                 card->ext_csd.generic_cmd6_time);
        }
 
-       if (err)
-               pr_err("%s: power class selection for ext_csd_bus_width %d"
-                      " failed\n", mmc_hostname(card->host), bus_width);
-
        return err;
 }
 
@@ -1104,7 +1100,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
                err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
                if (err)
-                       goto err;
+                       pr_warning("%s: power class selection to bus width %d"
+                                  " failed\n", mmc_hostname(card->host),
+                                  1 << bus_width);
        }
 
        /*
@@ -1136,7 +1134,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1164,7 +1165,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d ddr %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width, ddr);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1326,7 +1330,7 @@ static int mmc_suspend(struct mmc_host *host)
                if (!err)
                        mmc_card_set_sleep(host->card);
        } else if (!mmc_host_is_spi(host))
-               mmc_deselect_cards(host);
+               err = mmc_deselect_cards(host);
        host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
        mmc_release_host(host);
 
index c272c6868ecf6d11a39c3cf2be99f1257fccd026..b2b43f624b9edd13a8ad4d836ec2229636de2a9c 100644 (file)
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host)
  */
 static int mmc_sd_suspend(struct mmc_host *host)
 {
+       int err = 0;
+
        BUG_ON(!host);
        BUG_ON(!host->card);
 
        mmc_claim_host(host);
        if (!mmc_host_is_spi(host))
-               mmc_deselect_cards(host);
+               err = mmc_deselect_cards(host);
        host->card->state &= ~MMC_STATE_HIGHSPEED;
        mmc_release_host(host);
 
-       return 0;
+       return err;
 }
 
 /*
index 13d0e95380ab8f73d601060c3415b82bb253d12b..41c5fd8848f4d72c171d9e001b2171607e13eb83 100644 (file)
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
        if (ret)
                return ret;
 
+       if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+               pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+                          mmc_hostname(card->host), ctrl);
+
+       /* set as 4-bit bus width */
+       ctrl &= ~SDIO_BUS_WIDTH_MASK;
        ctrl |= SDIO_BUS_WIDTH_4BIT;
 
        ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
index 787aba1682bb362efa7f06baf16370f4551f2074..ab56f7db53150e907c406daff62be29fd5686cec 100644 (file)
 #define atmci_writel(port,reg,value)                   \
        __raw_writel((value), (port)->regs + reg)
 
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+       if (maxburst > 1)
+               return fls(maxburst) - 2;
+       else
+               return 0;
+}
+
 #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
index 420aca642b14ba42a6fa7fe014cc865e49445b73..f2c115e064387715a3c2f2cf796c5ae8b70bddd0 100644 (file)
@@ -910,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        enum dma_data_direction         direction;
        enum dma_transfer_direction     slave_dirn;
        unsigned int                    sglen;
+       u32                             maxburst;
        u32 iflags;
 
        data->error = -EINPROGRESS;
@@ -943,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        if (!chan)
                return -ENODEV;
 
-       if (host->caps.has_dma)
-               atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
-
        if (data->flags & MMC_DATA_READ) {
                direction = DMA_FROM_DEVICE;
                host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+               maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
        } else {
                direction = DMA_TO_DEVICE;
                host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+               maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
        }
 
+       atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+
        sglen = dma_map_sg(chan->device->dev, data->sg,
                        data->sg_len, direction);
 
@@ -2314,6 +2316,8 @@ static int __init atmci_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, host);
 
+       setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
        /* We need at least one slot to succeed */
        nr_slots = 0;
        ret = -ENODEV;
@@ -2352,8 +2356,6 @@ static int __init atmci_probe(struct platform_device *pdev)
                }
        }
 
-       setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
-
        dev_info(&pdev->dev,
                        "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
                        host->mapbase, irq, nr_slots);
index 9bbf45f8c538ade0990444c0c5d63614ddc09c3f..1ca5e72ceb651e84544a8cc451698ea54de6aafd 100644 (file)
@@ -418,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
        p->des3 = host->sg_dma;
        p->des0 = IDMAC_DES0_ER;
 
+       mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+
        /* Mask out interrupts - get Tx & Rx complete only */
        mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
                   SDMMC_IDMAC_INT_TI);
@@ -615,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
        u32 div;
 
        if (slot->clock != host->current_speed) {
-               if (host->bus_hz % slot->clock)
+               div = host->bus_hz / slot->clock;
+               if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
                        /*
                         * move the + 1 after the divide to prevent
                         * over-clocking the card.
                         */
-                       div = ((host->bus_hz / slot->clock) >> 1) + 1;
-               else
-                       div = (host->bus_hz  / slot->clock) >> 1;
+                       div += 1;
+
+               div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
 
                dev_info(&slot->mmc->class_dev,
                         "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -939,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
                        mdelay(20);
 
                if (cmd->data) {
-                       host->data = NULL;
                        dw_mci_stop_dma(host);
+                       host->data = NULL;
                }
        }
 }
@@ -1623,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
        if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
                mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
                mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
-               set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
                host->dma_ops->complete(host);
        }
 #endif
@@ -1725,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
 
 #ifdef CONFIG_MMC_DW_IDMAC
                                ctrl = mci_readl(host, BMOD);
-                               ctrl |= 0x01; /* Software reset of DMA */
+                               /* Software reset of DMA */
+                               ctrl |= SDMMC_IDMAC_SWRESET;
                                mci_writel(host, BMOD, ctrl);
 #endif
 
@@ -1950,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host)
        spin_lock_init(&host->lock);
        INIT_LIST_HEAD(&host->queue);
 
-
-       host->dma_ops = host->pdata->dma_ops;
-       dw_mci_init_dma(host);
-
        /*
         * Get the host data width - this assumes that HCON has been set with
         * the correct values.
@@ -1981,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host)
        }
 
        /* Reset all blocks */
-       if (!mci_wait_reset(&host->dev, host)) {
-               ret = -ENODEV;
-               goto err_dmaunmap;
-       }
+       if (!mci_wait_reset(&host->dev, host))
+               return -ENODEV;
+
+       host->dma_ops = host->pdata->dma_ops;
+       dw_mci_init_dma(host);
 
        /* Clear the interrupts for the host controller */
        mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2170,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host)
        if (host->vmmc)
                regulator_enable(host->vmmc);
 
-       if (host->dma_ops->init)
-               host->dma_ops->init(host);
-
        if (!mci_wait_reset(&host->dev, host)) {
                ret = -ENODEV;
                return ret;
        }
 
+       if (host->dma_ops->init)
+               host->dma_ops->init(host);
+
        /* Restore the old value at FIFOTH register */
        mci_writel(host, FIFOTH, host->fifoth_val);
 
index f0fcce40cd8daa27a6a10e44bb28ad9abcf5624d..50ff19a6236829b3143174b1372e141b86fb58fa 100644 (file)
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
        int bus_width = 0;
 
        pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
-       if (!pdata->gpio_wp)
-               pdata->gpio_wp = -1;
-
        pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
-       if (!pdata->gpio_cd)
-               pdata->gpio_cd = -1;
 
        if (of_get_property(np, "cd-inverted", NULL))
                pdata->cd_invert = true;
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
                return -EINVAL;
        }
 
+       if (!plat) {
+               plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+               if (!plat)
+                       return -ENOMEM;
+       }
+
        if (np)
                mmci_dt_populate_generic_pdata(np, plat);
 
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
        writel(0, host->base + MMCIMASK1);
        writel(0xfff, host->base + MMCICLEAR);
 
+       if (plat->gpio_cd == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto err_gpio_cd;
+       }
        if (gpio_is_valid(plat->gpio_cd)) {
                ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
                if (ret == 0)
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
                if (ret >= 0)
                        host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
        }
+       if (plat->gpio_wp == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto err_gpio_wp;
+       }
        if (gpio_is_valid(plat->gpio_wp)) {
                ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
                if (ret == 0)
index 34a90266ab11710d69f85c2a5648c68d1d6ec430..277161d279b8048600e6b7333d05d9c022562886 100644 (file)
@@ -894,8 +894,8 @@ static struct platform_driver mxs_mmc_driver = {
                .owner  = THIS_MODULE,
 #ifdef CONFIG_PM
                .pm     = &mxs_mmc_pm_ops,
-               .of_match_table = mxs_mmc_dt_ids,
 #endif
+               .of_match_table = mxs_mmc_dt_ids,
        },
 };
 
index 552196c764d40bf4925e5bec5861729ad6af3716..3e8dcf8d2e051efea306e933fa701d7f92d53333 100644 (file)
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
        .set_ios        = mmc_omap_set_ios,
 };
 
-static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
 {
        struct mmc_omap_slot *slot = NULL;
        struct mmc_host *mmc;
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
        }
 
        host->nr_slots = pdata->nr_slots;
+       host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
+       host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+       if (!host->mmc_omap_wq)
+               goto err_plat_cleanup;
+
        for (i = 0; i < pdata->nr_slots; i++) {
                ret = mmc_omap_new_slot(host, i);
                if (ret < 0) {
                        while (--i >= 0)
                                mmc_omap_remove_slot(host->slots[i]);
 
-                       goto err_plat_cleanup;
+                       goto err_destroy_wq;
                }
        }
 
-       host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
-
-       host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
-       if (!host->mmc_omap_wq)
-               goto err_plat_cleanup;
-
        return 0;
 
+err_destroy_wq:
+       destroy_workqueue(host->mmc_omap_wq);
 err_plat_cleanup:
        if (pdata->cleanup)
                pdata->cleanup(&pdev->dev);
index 9a7a60aeb19ea35dc921cd43f2e796c2bfd75469..389a3eedfc24505de5034e7d842f6e5678e4bc25 100644 (file)
@@ -85,7 +85,6 @@
 #define BRR_ENABLE             (1 << 5)
 #define DTO_ENABLE             (1 << 20)
 #define INIT_STREAM            (1 << 1)
-#define ACEN_ACMD12            (1 << 2)
 #define DP_SELECT              (1 << 21)
 #define DDIR                   (1 << 4)
 #define DMA_EN                 0x1
 #define OMAP_MMC_MAX_CLOCK     52000000
 #define DRIVER_NAME            "omap_hsmmc"
 
-#define AUTO_CMD12             (1 << 0)        /* Auto CMD12 support */
 /*
  * One controller can have multiple slots, like on some omap boards using
  * omap.c controller driver. Luckily this is not currently done on any known
@@ -177,7 +175,6 @@ struct omap_hsmmc_host {
        int                     reqs_blocked;
        int                     use_reg;
        int                     req_in_progress;
-       unsigned int            flags;
        struct omap_hsmmc_next  next_data;
 
        struct  omap_mmc_platform_data  *pdata;
@@ -773,8 +770,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
                cmdtype = 0x3;
 
        cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
-       if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
-               cmdreg |= ACEN_ACMD12;
 
        if (data) {
                cmdreg |= DP_SELECT | MSBS | BCE;
@@ -847,14 +842,11 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
        else
                data->bytes_xfered = 0;
 
-       if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
-               omap_hsmmc_start_command(host, data->stop, NULL);
-       } else {
-               if (data->stop)
-                       data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
-                                                       RSP76);
+       if (!data->stop) {
                omap_hsmmc_request_done(host, data->mrq);
+               return;
        }
+       omap_hsmmc_start_command(host, data->stop, NULL);
 }
 
 /*
@@ -1859,7 +1851,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
        host->mapbase   = res->start + pdata->reg_offset;
        host->base      = ioremap(host->mapbase, SZ_4K);
        host->power_mode = MMC_POWER_OFF;
-       host->flags     = AUTO_CMD12;
        host->next_data.cookie = 1;
 
        platform_set_drvdata(pdev, host);
index 55a164fcaa157ece64d9c2be581e5c7b46b71e9d..a50c205ea2085f3266ee906b673d062e3a9aaf10 100644 (file)
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
                if (sc->ext_cd_irq &&
                    request_threaded_irq(sc->ext_cd_irq, NULL,
                                         sdhci_s3c_gpio_card_detect_thread,
-                                        IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                        IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                         dev_name(dev), sc) == 0) {
                        int status = gpio_get_value(sc->ext_cd_gpio);
                        if (pdata->ext_cd_gpio_invert)
index 1fe32dfa7cd4913fdb5fde321c2c0fd2da90428b..423da8194cd84e5597c169316311a5ff9629b845 100644 (file)
@@ -4,7 +4,7 @@
  * Support of SDHCI platform devices for spear soc family
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired by sdhci-pltfm.c
  *
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = {
 module_platform_driver(sdhci_driver);
 
 MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_LICENSE("GPL v2");
index e626732aff77d3ebd0563c3d1720da62aab6d7ff..f4b8b4db3a9acd8ccc1c3fdf32c18efc2a8bc88d 100644 (file)
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
        }
 
        if (count >= 0xF) {
-               pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
-                          mmc_hostname(host->mmc), count, cmd->opcode);
+               DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+                   mmc_hostname(host->mmc), count, cmd->opcode);
                count = 0xE;
        }
 
index 5760c1a4b3f66ea3125b71d28ab4f62ceb4ee925..27143e042af5b2dfec767a1d1cf61762fcf2734b 100644 (file)
@@ -128,7 +128,7 @@ config MTD_AFS_PARTS
 
 config MTD_OF_PARTS
        tristate "OpenFirmware partitioning information support"
-       default Y
+       default y
        depends on OF
        help
          This provides a partition parsing function which derives
index 608321ee056e5cb69c22f950ad2eb72f974fd6da..63d2a64331f75d3287a68c2f72f8843cddc4f02d 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright Â© 2006-2008  Florian Fainelli <florian@openwrt.org>
  *                       Mike Albon <malbon@openwrt.org>
  * Copyright Â© 2009-2010  Daniel Dickinson <openwrt@cshore.neomailbox.net>
- * Copyright Â© 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright Â© 2011-2012  Jonas Gorski <jonas.gorski@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -82,6 +82,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
        int namelen = 0;
        int i;
        u32 computed_crc;
+       bool rootfs_first = false;
 
        if (bcm63xx_detect_cfe(master))
                return -EINVAL;
@@ -109,6 +110,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
                char *boardid = &(buf->board_id[0]);
                char *tagversion = &(buf->tag_version[0]);
 
+               sscanf(buf->flash_image_start, "%u", &rootfsaddr);
                sscanf(buf->kernel_address, "%u", &kerneladdr);
                sscanf(buf->kernel_length, "%u", &kernellen);
                sscanf(buf->total_length, "%u", &totallen);
@@ -117,10 +119,19 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
                        tagversion, boardid);
 
                kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
-               rootfsaddr = kerneladdr + kernellen;
+               rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
                spareaddr = roundup(totallen, master->erasesize) + cfelen;
                sparelen = master->size - spareaddr - nvramlen;
-               rootfslen = spareaddr - rootfsaddr;
+
+               if (rootfsaddr < kerneladdr) {
+                       /* default Broadcom layout */
+                       rootfslen = kerneladdr - rootfsaddr;
+                       rootfs_first = true;
+               } else {
+                       /* OpenWrt layout */
+                       rootfsaddr = kerneladdr + kernellen;
+                       rootfslen = spareaddr - rootfsaddr;
+               }
        } else {
                pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
                        buf->header_crc, computed_crc);
@@ -156,18 +167,26 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
        curpart++;
 
        if (kernellen > 0) {
-               parts[curpart].name = "kernel";
-               parts[curpart].offset = kerneladdr;
-               parts[curpart].size = kernellen;
+               int kernelpart = curpart;
+
+               if (rootfslen > 0 && rootfs_first)
+                       kernelpart++;
+               parts[kernelpart].name = "kernel";
+               parts[kernelpart].offset = kerneladdr;
+               parts[kernelpart].size = kernellen;
                curpart++;
        }
 
        if (rootfslen > 0) {
-               parts[curpart].name = "rootfs";
-               parts[curpart].offset = rootfsaddr;
-               parts[curpart].size = rootfslen;
-               if (sparelen > 0)
-                       parts[curpart].size += sparelen;
+               int rootfspart = curpart;
+
+               if (kernellen > 0 && rootfs_first)
+                       rootfspart--;
+               parts[rootfspart].name = "rootfs";
+               parts[rootfspart].offset = rootfsaddr;
+               parts[rootfspart].size = rootfslen;
+               if (sparelen > 0  && !rootfs_first)
+                       parts[rootfspart].size += sparelen;
                curpart++;
        }
 
index d02592e6a0f02a89195ef1aea302445d98585b0e..22d0493a026ff7adfe07b62bf7e11555e9f67982 100644 (file)
@@ -317,7 +317,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 
        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
-               pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
+               pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
        }
 }
 
@@ -328,10 +328,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 
        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
-               pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
+               pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
        }
 }
 
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+
+       /*
+        *  S29NS512P flash uses more than 8bits to report number of sectors,
+        * which is not permitted by CFI.
+        */
+       cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+       pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
+}
+
 /* Used to fix CFI-Tables of chips without Extended Query Tables */
 static struct cfi_fixup cfi_nopri_fixup_table[] = {
        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +375,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+       { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
index ddf9ec6d9168ed3848fdd249a67c2c2053b43444..4558e0f4d07f89b39a6ab7cce953ef5de701111b 100644 (file)
@@ -70,7 +70,7 @@ struct cmdline_mtd_partition {
 /* mtdpart_setup() parses into here */
 static struct cmdline_mtd_partition *partitions;
 
-/* the command line passed to mtdpart_setupd() */
+/* the command line passed to mtdpart_setup() */
 static char *cmdline;
 static int cmdline_parsed = 0;
 
index a4a80b742e65e99d602161002dd4296614e8593d..681e2ee0f2d6287a1c0e2170358a5e4cc9b11276 100644 (file)
@@ -52,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
 
        while (pages) {
                page = page_read(mapping, index);
-               if (!page)
-                       return -ENOMEM;
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -112,8 +110,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
                len = len - cpylen;
 
                page = page_read(dev->blkdev->bd_inode->i_mapping, index);
-               if (!page)
-                       return -ENOMEM;
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -148,8 +144,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
                len = len - cpylen;
 
                page = page_read(mapping, index);
-               if (!page)
-                       return -ENOMEM;
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -271,7 +265,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
        dev->mtd.flags = MTD_CAP_RAM;
        dev->mtd._erase = block2mtd_erase;
        dev->mtd._write = block2mtd_write;
-       dev->mtd._writev = mtd_writev;
        dev->mtd._sync = block2mtd_sync;
        dev->mtd._read = block2mtd_read;
        dev->mtd.priv = dev;
index 50aa90aa7a7fce0706a8ef5af2aa683e84fb346b..f70854d728fe04f3ea7ae2507efd9f458358bc24 100644 (file)
@@ -227,7 +227,7 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
        u8 data8, *dst8;
 
        doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
-       cdr = len & 0x3;
+       cdr = len & 0x1;
        len4 = len - cdr;
 
        if (first)
@@ -732,12 +732,24 @@ err:
  * @len: the number of bytes to be read (must be a multiple of 4)
  * @buf: the buffer to be filled in (or NULL is forget bytes)
  * @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
  *
  */
 static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
-                                 int first)
+                                 int first, int last_odd)
 {
-       doc_read_data_area(docg3, buf, len, first);
+       if (last_odd && len > 0) {
+               doc_read_data_area(docg3, buf, 1, first);
+               doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+       } else {
+               doc_read_data_area(docg3, buf, len, first);
+       }
        doc_delay(docg3, 2);
        return len;
 }
@@ -850,6 +862,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
        u8 *buf = ops->datbuf;
        size_t len, ooblen, nbdata, nboob;
        u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+       int max_bitflips = 0;
 
        if (buf)
                len = ops->len;
@@ -876,7 +889,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
        ret = 0;
        skip = from % DOC_LAYOUT_PAGE_SIZE;
        mutex_lock(&docg3->cascade->lock);
-       while (!ret && (len > 0 || ooblen > 0)) {
+       while (ret >= 0 && (len > 0 || ooblen > 0)) {
                calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
                        docg3->reliable);
                nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
@@ -887,20 +900,20 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
                ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
                if (ret < 0)
                        goto err_in_read;
-               ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+               ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
                if (ret < skip)
                        goto err_in_read;
-               ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
+               ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
                if (ret < nbdata)
                        goto err_in_read;
                doc_read_page_getbytes(docg3,
                                       DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
-                                      NULL, 0);
-               ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+                                      NULL, 0, (skip + nbdata) % 2);
+               ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
                if (ret < nboob)
                        goto err_in_read;
                doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
-                                      NULL, 0);
+                                      NULL, 0, nboob % 2);
 
                doc_get_bch_hw_ecc(docg3, hwecc);
                eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
@@ -936,7 +949,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
                        }
                        if (ret > 0) {
                                mtd->ecc_stats.corrected += ret;
-                               ret = -EUCLEAN;
+                               max_bitflips = max(max_bitflips, ret);
+                               ret = max_bitflips;
                        }
                }
 
@@ -1004,7 +1018,7 @@ static int doc_reload_bbt(struct docg3 *docg3)
                                                     DOC_LAYOUT_PAGE_SIZE);
                if (!ret)
                        doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
-                                              buf, 1);
+                                              buf, 1, 0);
                buf += DOC_LAYOUT_PAGE_SIZE;
        }
        doc_read_page_finish(docg3);
@@ -1064,10 +1078,10 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
        ret = doc_reset_seq(docg3);
        if (!ret)
                ret = doc_read_page_prepare(docg3, block0, block1, page,
-                                           ofs + DOC_LAYOUT_WEAR_OFFSET);
+                                           ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
        if (!ret)
                ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
-                                            buf, 1);
+                                            buf, 1, 0);
        doc_read_page_finish(docg3);
 
        if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
index 1924d247c1cb924c478ebd644475b799dd044297..5d0d68c3fe27a79a097d64fa03246c276510ad3b 100644 (file)
@@ -639,12 +639,16 @@ static const struct spi_device_id m25p_ids[] = {
        { "en25q32b", INFO(0x1c3016, 0, 64 * 1024,  64, 0) },
        { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
 
+       /* Everspin */
+       { "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2) },
+
        /* Intel/Numonyx -- xxxs33b */
        { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
        { "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
        { "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
 
        /* Macronix */
+       { "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
        { "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
        { "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
        { "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
@@ -728,6 +732,7 @@ static const struct spi_device_id m25p_ids[] = {
        { "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
        { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
        { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+       { "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
 
        /* Catalyst / On Semiconductor -- non-JEDEC */
        { "cat25c11", CAT25_INFO(  16, 8, 16, 1) },
index 797d43cd35507372fe7ea4c0205e169dac9fb0a5..67960362681e7f7a72933aaf87b5ffa515c36cfa 100644 (file)
@@ -990,9 +990,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
                goto err_clk;
        }
 
-       ret = clk_enable(dev->clk);
+       ret = clk_prepare_enable(dev->clk);
        if (ret)
-               goto err_clk_enable;
+               goto err_clk_prepare_enable;
 
        ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
        if (ret) {
@@ -1020,8 +1020,8 @@ err_bank_setup:
        free_irq(irq, dev);
        platform_set_drvdata(pdev, NULL);
 err_irq:
-       clk_disable(dev->clk);
-err_clk_enable:
+       clk_disable_unprepare(dev->clk);
+err_clk_prepare_enable:
        clk_put(dev->clk);
 err_clk:
        iounmap(dev->io_base);
@@ -1074,7 +1074,7 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        free_irq(irq, dev);
 
-       clk_disable(dev->clk);
+       clk_disable_unprepare(dev->clk);
        clk_put(dev->clk);
        iounmap(dev->io_base);
        kfree(dev);
@@ -1091,7 +1091,7 @@ int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
        struct spear_smi *dev = platform_get_drvdata(pdev);
 
        if (dev && dev->clk)
-               clk_disable(dev->clk);
+               clk_disable_unprepare(dev->clk);
 
        return 0;
 }
@@ -1102,7 +1102,7 @@ int spear_smi_resume(struct platform_device *pdev)
        int ret = -EPERM;
 
        if (dev && dev->clk)
-               ret = clk_enable(dev->clk);
+               ret = clk_prepare_enable(dev->clk);
 
        if (!ret)
                spear_smi_hw_init(dev);
index dbfe17baf0463a7e47325f2b228487e29e28391c..45abed67f1ef176da4469ed5f9b78b1c66e874c4 100644 (file)
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
 
 static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
 {
-       int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+       int qinfo_lines = ARRAY_SIZE(qinfo_array);
        int i;
        int bankwidth = map_bankwidth(map) * 8;
        int major, minor;
index 8af67cfd671acac48ad7885a6e73ebe2f0ecdbff..5ba2458e799ac4e3db5d8d78b7c9b4814bec5a09 100644 (file)
@@ -224,7 +224,7 @@ config MTD_CK804XROM
 
 config MTD_SCB2_FLASH
        tristate "BIOS flash chip on Intel SCB2 boards"
-       depends on X86 && MTD_JEDECPROBE
+       depends on X86 && MTD_JEDECPROBE && PCI
        help
          Support for treating the BIOS flash chip on Intel SCB2 boards
          as an MTD device - with this you can reprogram your BIOS.
index 92e1f41634c7135ffc1b39d50c0323f0de33c603..93f03175c82dce9185f5144e8bc129f849165764 100644 (file)
@@ -260,18 +260,7 @@ static struct pci_driver vr_nor_pci_driver = {
        .id_table = vr_nor_pci_ids,
 };
 
-static int __init vr_nor_mtd_init(void)
-{
-       return pci_register_driver(&vr_nor_pci_driver);
-}
-
-static void __exit vr_nor_mtd_exit(void)
-{
-       pci_unregister_driver(&vr_nor_pci_driver);
-}
-
-module_init(vr_nor_mtd_init);
-module_exit(vr_nor_mtd_exit);
+module_pci_driver(vr_nor_pci_driver);
 
 MODULE_AUTHOR("Andy Lowe");
 MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
index 1d005a3e9b41603856c40a60fee6de972771634f..f14ce0af763f0dda66a831470319e23611152eb2 100644 (file)
@@ -352,18 +352,7 @@ static struct pci_driver mtd_pci_driver = {
        .id_table =     mtd_pci_ids,
 };
 
-static int __init mtd_pci_maps_init(void)
-{
-       return pci_register_driver(&mtd_pci_driver);
-}
-
-static void __exit mtd_pci_maps_exit(void)
-{
-       pci_unregister_driver(&mtd_pci_driver);
-}
-
-module_init(mtd_pci_maps_init);
-module_exit(mtd_pci_maps_exit);
+module_pci_driver(mtd_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
index 934a72c8007880407247915d1d814225c45d65f2..9dcbc684abdb27e9e0218a585bdbc1aa079e93af 100644 (file)
@@ -234,20 +234,7 @@ static struct pci_driver scb2_flash_driver = {
        .remove =   __devexit_p(scb2_flash_remove),
 };
 
-static int __init
-scb2_flash_init(void)
-{
-       return pci_register_driver(&scb2_flash_driver);
-}
-
-static void __exit
-scb2_flash_exit(void)
-{
-       pci_unregister_driver(&scb2_flash_driver);
-}
-
-module_init(scb2_flash_init);
-module_exit(scb2_flash_exit);
+module_pci_driver(scb2_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
index 71b0ba7979121f8cde4480751f547d42d8fa858b..e7534c82f93ab381a86890bfa92d3a6d3027295a 100644 (file)
@@ -59,7 +59,7 @@ static struct mtd_partition bigflash_parts[] = {
        }
 };
 
-static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
 
 #define init_sbc82xx_one_flash(map, br, or)                    \
 do {                                                           \
index c837507dfb1c73021da2a47eebd975bd40573ef9..575730744fdb3ce83dc8e4a78cf65cefb3413fdc 100644 (file)
@@ -250,6 +250,43 @@ static ssize_t mtd_name_show(struct device *dev,
 }
 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
 
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+       unsigned int bitflip_threshold;
+       int retval;
+
+       retval = kstrtouint(buf, 0, &bitflip_threshold);
+       if (retval)
+               return retval;
+
+       mtd->bitflip_threshold = bitflip_threshold;
+       return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+                  mtd_bitflip_threshold_show,
+                  mtd_bitflip_threshold_store);
+
 static struct attribute *mtd_attrs[] = {
        &dev_attr_type.attr,
        &dev_attr_flags.attr,
@@ -260,6 +297,8 @@ static struct attribute *mtd_attrs[] = {
        &dev_attr_oobsize.attr,
        &dev_attr_numeraseregions.attr,
        &dev_attr_name.attr,
+       &dev_attr_ecc_strength.attr,
+       &dev_attr_bitflip_threshold.attr,
        NULL,
 };
 
@@ -322,6 +361,10 @@ int add_mtd_device(struct mtd_info *mtd)
        mtd->index = i;
        mtd->usecount = 0;
 
+       /* default value if not set by driver */
+       if (mtd->bitflip_threshold == 0)
+               mtd->bitflip_threshold = mtd->ecc_strength;
+
        if (is_power_of_2(mtd->erasesize))
                mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
        else
@@ -757,12 +800,24 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
             u_char *buf)
 {
+       int ret_code;
        *retlen = 0;
        if (from < 0 || from > mtd->size || len > mtd->size - from)
                return -EINVAL;
        if (!len)
                return 0;
-       return mtd->_read(mtd, from, len, retlen, buf);
+
+       /*
+        * In the absence of an error, drivers return a non-negative integer
+        * representing the maximum number of bitflips that were corrected on
+        * any one ecc region (if applicable; zero otherwise).
+        */
+       ret_code = mtd->_read(mtd, from, len, retlen, buf);
+       if (unlikely(ret_code < 0))
+               return ret_code;
+       if (mtd->ecc_strength == 0)
+               return 0;       /* device lacks ecc */
+       return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
 }
 EXPORT_SYMBOL_GPL(mtd_read);
 
index ae36d7e1e91368dd36239a57f73ef4ccda637f56..551e316e4454d99f9b552a2924bea045784c7a8c 100644 (file)
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
 }
 
 static void mtdoops_do_dump(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
-               const char *s2, unsigned long l2)
+                           enum kmsg_dump_reason reason)
 {
        struct mtdoops_context *cxt = container_of(dumper,
                        struct mtdoops_context, dump);
-       unsigned long s1_start, s2_start;
-       unsigned long l1_cpy, l2_cpy;
-       char *dst;
-
-       if (reason != KMSG_DUMP_OOPS &&
-           reason != KMSG_DUMP_PANIC)
-               return;
 
        /* Only dump oopses if dump_oops is set */
        if (reason == KMSG_DUMP_OOPS && !dump_oops)
                return;
 
-       dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
-       l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
-       l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
-       s2_start = l2 - l2_cpy;
-       s1_start = l1 - l1_cpy;
-
-       memcpy(dst, s1 + s1_start, l1_cpy);
-       memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+       kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+                            record_size - MTDOOPS_HEADER_SIZE, NULL);
 
        /* Panics must be written immediately */
        if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
                return;
        }
 
+       cxt->dump.max_reason = KMSG_DUMP_OOPS;
        cxt->dump.dump = mtdoops_do_dump;
        err = kmsg_dump_register(&cxt->dump);
        if (err) {
index 9651c06de0a9298f4db58265524ecaebe2c2b9b4..d518e4db8a0bf8665156fd2383ad40fcde0fe146 100644 (file)
@@ -67,12 +67,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
        stats = part->master->ecc_stats;
        res = part->master->_read(part->master, from + part->offset, len,
                                  retlen, buf);
-       if (unlikely(res)) {
-               if (mtd_is_bitflip(res))
-                       mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
-               if (mtd_is_eccerr(res))
-                       mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
-       }
+       if (unlikely(mtd_is_eccerr(res)))
+               mtd->ecc_stats.failed +=
+                       part->master->ecc_stats.failed - stats.failed;
+       else
+               mtd->ecc_stats.corrected +=
+                       part->master->ecc_stats.corrected - stats.corrected;
        return res;
 }
 
@@ -517,6 +517,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
 
        slave->mtd.ecclayout = master->ecclayout;
        slave->mtd.ecc_strength = master->ecc_strength;
+       slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
        if (master->_block_isbad) {
                uint64_t offs = 0;
 
index 7d17cecad69d8fccc1467eaa210c6fe2915197dd..31bb7e5b504aa8f05e31472787d2dac7aff9659f 100644 (file)
@@ -115,6 +115,46 @@ config MTD_NAND_OMAP2
           Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
          platforms.
 
+config MTD_NAND_OMAP_BCH
+       depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+       bool "Enable support for hardware BCH error correction"
+       default n
+       select BCH
+       select BCH_CONST_PARAMS
+       help
+        Support for hardware BCH error correction.
+
+choice
+       prompt "BCH error correction capability"
+       depends on MTD_NAND_OMAP_BCH
+
+config MTD_NAND_OMAP_BCH8
+       bool "8 bits / 512 bytes (recommended)"
+       help
+        Support correcting up to 8 bitflips per 512-byte block.
+        This will use 13 bytes of spare area per 512 bytes of page data.
+        This is the recommended mode, as 4-bit mode does not work
+        on some OMAP3 revisions, due to a hardware bug.
+
+config MTD_NAND_OMAP_BCH4
+       bool "4 bits / 512 bytes"
+       help
+        Support correcting up to 4 bitflips per 512-byte block.
+        This will use 7 bytes of spare area per 512 bytes of page data.
+        Note that this mode does not work on some OMAP3 revisions, due to a
+        hardware bug. Please check your OMAP datasheet before selecting this
+        mode.
+
+endchoice
+
+if MTD_NAND_OMAP_BCH
+config BCH_CONST_M
+       default 13
+config BCH_CONST_T
+       default 4 if MTD_NAND_OMAP_BCH4
+       default 8 if MTD_NAND_OMAP_BCH8
+endif
+
 config MTD_NAND_IDS
        tristate
 
@@ -440,7 +480,7 @@ config MTD_NAND_NANDSIM
 
 config MTD_NAND_GPMI_NAND
         bool "GPMI NAND Flash Controller driver"
-        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
         help
         Enables NAND Flash support for IMX23 or IMX28.
         The GPMI controller is very powerful, with the help of BCH
index 4f20e1d8bef10b43546bd64d51933c0ed010093d..60a0dfdb08087a55311ee7f2e53522ecde7baa17 100644 (file)
@@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
        }
        err = 0;
        if (corrected)
-               err = -EUCLEAN;
+               err = 1;        /* return max_bitflips per ecc step */
        if (uncorrected)
                err = -EBADMSG;
 out:
@@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
        }
        err = 0;
        if (corrected)
-               err = -EUCLEAN;
+               err = 1;        /* return max_bitflips per ecc step */
        if (uncorrected)
                err = -EBADMSG;
        return err;
index 2165576a1c67df0e623752970fdfdd141f3ea931..97ac6712bb1926c6ff8e31292cc3c362d4d53d9d 100644 (file)
@@ -324,9 +324,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
  * mtd:        mtd info structure
  * chip:       nand chip info structure
  * buf:        buffer to store read data
+ * oob_required:    caller expects OOB data read to chip->oob_poi
  */
-static int atmel_nand_read_page(struct mtd_info *mtd,
-               struct nand_chip *chip, uint8_t *buf, int page)
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+                               uint8_t *buf, int oob_required, int page)
 {
        int eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -335,6 +336,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
        uint8_t *oob = chip->oob_poi;
        uint8_t *ecc_pos;
        int stat;
+       unsigned int max_bitflips = 0;
 
        /*
         * Errata: ALE is incorrectly wired up to the ECC controller
@@ -371,10 +373,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
        /* check if there's an error */
        stat = chip->ecc.correct(mtd, p, oob, NULL);
 
-       if (stat < 0)
+       if (stat < 0) {
                mtd->ecc_stats.failed++;
-       else
+       } else {
                mtd->ecc_stats.corrected += stat;
+               max_bitflips = max_t(unsigned int, max_bitflips, stat);
+       }
 
        /* get back to oob start (end of page) */
        chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -382,7 +386,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
        /* read the oob */
        chip->read_buf(mtd, oob, mtd->oobsize);
 
-       return 0;
+       return max_bitflips;
 }
 
 /*
index 73abbc3e093eced219347f93f9fefc5b31c2bd15..9f609d2dcf62d3dc993358e4de817de00d154267 100644 (file)
@@ -508,8 +508,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
        this->chip_delay = 30;
        this->ecc.mode = NAND_ECC_SOFT;
 
-       this->options = NAND_NO_AUTOINCR;
-
        if (pd->devwidth)
                this->options |= NAND_BUSWIDTH_16;
 
index a930666d0687655e1f5fe27c1604c34e38fe6911..5914bb32e0014e189cd42aafd34017698222dd27 100644 (file)
@@ -22,9 +22,9 @@
 
 /* ---- Private Function Prototypes -------------------------------------- */
 static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
-       struct nand_chip *chip, uint8_t *buf, int page);
+       struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
 static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
-       struct nand_chip *chip, const uint8_t *buf);
+       struct nand_chip *chip, const uint8_t *buf, int oob_required);
 
 /* ---- Private Variables ------------------------------------------------ */
 
@@ -103,11 +103,12 @@ static struct nand_ecclayout nand_hw_eccoob_4096 = {
 *  @mtd:       mtd info structure
 *  @chip:      nand chip info structure
 *  @buf:       buffer to store read data
+*  @oob_required:      caller expects OOB data read to chip->oob_poi
 *
 ***************************************************************************/
 static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
                                       struct nand_chip *chip, uint8_t * buf,
-                                                int page)
+                                      int oob_required, int page)
 {
        int sectorIdx = 0;
        int eccsize = chip->ecc.size;
@@ -116,6 +117,7 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
        uint8_t eccCalc[NAND_ECC_NUM_BYTES];
        int sectorOobSize = mtd->oobsize / eccsteps;
        int stat;
+       unsigned int max_bitflips = 0;
 
        for (sectorIdx = 0; sectorIdx < eccsteps;
                        sectorIdx++, datap += eccsize) {
@@ -177,9 +179,10 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
                        }
 #endif
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
                }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /****************************************************************************
@@ -188,10 +191,11 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
 *  @mtd:       mtd info structure
 *  @chip:      nand chip info structure
 *  @buf:       data buffer
+*  @oob_required:      must write chip->oob_poi to OOB
 *
 ***************************************************************************/
 static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
-       struct nand_chip *chip, const uint8_t *buf)
+       struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
        int sectorIdx = 0;
        int eccsize = chip->ecc.size;
index 6908cdde3065e73b24509e8c9d32865f5befb1d9..c855e7cd337b2f7a278a164c2e7a7b7f17a723f8 100644 (file)
@@ -341,7 +341,7 @@ static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
         * for MLC parts which may have permanently stuck bits.
         */
        struct nand_chip *chip = mtd->priv;
-       int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
+       int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
        if (ret < 0)
                return -EFAULT;
        else {
@@ -476,12 +476,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
                this->badblock_pattern = &largepage_bbt;
        }
 
-       /*
-        * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
-        * conservative guess, given 13 ecc bytes and using bch alg.
-        * (Assume Galois field order m=15 to allow a margin of error.)
-        */
-       this->ecc.strength = 6;
+       this->ecc.strength = 8;
 
 #endif
 
index d7b86b925de5ead4b4c0653bf13080bea55a4156..3f1c18599cbd9484096caed856f69c79c61c2bff 100644 (file)
@@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
 }
 
 static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-               uint8_t *buf, int page)
+               uint8_t *buf, int oob_required, int page)
 {
        bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
        bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -567,7 +567,7 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
 }
 
 static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-               const uint8_t *buf)
+               const uint8_t *buf, int oob_required)
 {
        bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
        bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
index 2a96e1a12062314234f2cd74b86163029abc3e3d..f3f6cfedd69eb5e1367c196de7e8e249740bb77b 100644 (file)
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 static int cafe_device_ready(struct mtd_info *mtd)
 {
        struct cafe_priv *cafe = mtd->priv;
-       int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+       int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
        uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
 
        cafe_writel(cafe, irqs, NAND_IRQ);
@@ -364,25 +364,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
 
 /* Don't use -- use nand_read_oob_std for now */
 static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                             int page, int sndcmd)
+                             int page)
 {
        chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-       return 1;
+       return 0;
 }
 /**
  * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
  * @mtd:       mtd info structure
  * @chip:      nand chip info structure
  * @buf:       buffer to store read data
+ * @oob_required:      caller expects OOB data read to chip->oob_poi
  *
  * The hw generator calculates the error syndrome automatically. Therefor
  * we need a special oob layout and handling.
  */
 static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-                              uint8_t *buf, int page)
+                              uint8_t *buf, int oob_required, int page)
 {
        struct cafe_priv *cafe = mtd->priv;
+       unsigned int max_bitflips = 0;
 
        cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
                     cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +451,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                } else {
                        dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
                        mtd->ecc_stats.corrected += n;
+                       max_bitflips = max_t(unsigned int, max_bitflips, n);
                }
        }
 
-       return 0;
+       return max_bitflips;
 }
 
 static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -518,7 +521,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
 
 
 static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
-                                         struct nand_chip *chip, const uint8_t *buf)
+                                         struct nand_chip *chip,
+                                         const uint8_t *buf, int oob_required)
 {
        struct cafe_priv *cafe = mtd->priv;
 
@@ -530,16 +534,17 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
 }
 
 static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-                               const uint8_t *buf, int page, int cached, int raw)
+                               const uint8_t *buf, int oob_required, int page,
+                               int cached, int raw)
 {
        int status;
 
        chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
        if (unlikely(raw))
-               chip->ecc.write_page_raw(mtd, chip, buf);
+               chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
        else
-               chip->ecc.write_page(mtd, chip, buf);
+               chip->ecc.write_page(mtd, chip, buf, oob_required);
 
        /*
         * Cached progamming disabled for now, Not sure if its worth the
@@ -685,7 +690,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
 
        /* Enable the following for a flash based bad block table */
        cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
-       cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+       cafe->nand.options = NAND_OWN_BUFFERS;
 
        if (skipbbt) {
                cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -888,17 +893,7 @@ static struct pci_driver cafe_nand_pci_driver = {
        .resume = cafe_nand_resume,
 };
 
-static int __init cafe_nand_init(void)
-{
-       return pci_register_driver(&cafe_nand_pci_driver);
-}
-
-static void __exit cafe_nand_exit(void)
-{
-       pci_unregister_driver(&cafe_nand_pci_driver);
-}
-module_init(cafe_nand_init);
-module_exit(cafe_nand_exit);
+module_pci_driver(cafe_nand_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
index 821c34c6250021246dfdb7ef97a44e497a05726c..adb6c3ef37fb0d8a645177f1c8efaf6b098bde8e 100644 (file)
@@ -240,7 +240,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
 
        /* Enable the following for a flash based bad block table */
        this->bbt_options = NAND_BBT_USE_FLASH;
-       this->options = NAND_NO_AUTOINCR;
 
        /* Scan to find existence of the device */
        if (nand_scan(new_mtd, 1)) {
index a9e57d686297096e0700935a90d6a5257aa789ff..0650aafa0dd2238b2af08a1328970e5efb38d24e 100644 (file)
@@ -924,9 +924,10 @@ bool is_erased(uint8_t *buf, int len)
 #define ECC_LAST_ERR(x)                ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
 
 static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
-                                       uint32_t irq_status)
+                      uint32_t irq_status, unsigned int *max_bitflips)
 {
        bool check_erased_page = false;
+       unsigned int bitflips = 0;
 
        if (irq_status & INTR_STATUS__ECC_ERR) {
                /* read the ECC errors. we'll ignore them for now */
@@ -965,6 +966,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
                                        /* correct the ECC error */
                                        buf[offset] ^= err_correction_value;
                                        denali->mtd.ecc_stats.corrected++;
+                                       bitflips++;
                                }
                        } else {
                                /* if the error is not correctable, need to
@@ -984,6 +986,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
                clear_interrupts(denali);
                denali_set_intr_modes(denali, true);
        }
+       *max_bitflips = bitflips;
        return check_erased_page;
 }
 
@@ -1084,7 +1087,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
  * by write_page above.
  * */
 static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-                               const uint8_t *buf)
+                               const uint8_t *buf, int oob_required)
 {
        /* for regular page writes, we let HW handle all the ECC
         * data written to the device. */
@@ -1096,7 +1099,7 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  * write_page() function above.
  */
 static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                                       const uint8_t *buf)
+                                       const uint8_t *buf, int oob_required)
 {
        /* for raw page writes, we want to disable ECC and simply write
           whatever data is in the buffer. */
@@ -1110,17 +1113,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
 }
 
 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                          int page, int sndcmd)
+                          int page)
 {
        read_oob_data(mtd, chip->oob_poi, page);
 
-       return 0; /* notify NAND core to send command to
-                          NAND device. */
+       return 0;
 }
 
 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-                           uint8_t *buf, int page)
+                           uint8_t *buf, int oob_required, int page)
 {
+       unsigned int max_bitflips;
        struct denali_nand_info *denali = mtd_to_denali(mtd);
 
        dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1156,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 
        memcpy(buf, denali->buf.buf, mtd->writesize);
 
-       check_erased_page = handle_ecc(denali, buf, irq_status);
+       check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
        denali_enable_dma(denali, false);
 
        if (check_erased_page) {
@@ -1167,11 +1170,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                                denali->mtd.ecc_stats.failed++;
                }
        }
-       return 0;
+       return max_bitflips;
 }
 
 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        struct denali_nand_info *denali = mtd_to_denali(mtd);
 
@@ -1702,17 +1705,4 @@ static struct pci_driver denali_pci_driver = {
        .remove = denali_pci_remove,
 };
 
-static int __devinit denali_init(void)
-{
-       printk(KERN_INFO "Spectra MTD driver\n");
-       return pci_register_driver(&denali_pci_driver);
-}
-
-/* Free memory */
-static void __devexit denali_exit(void)
-{
-       pci_unregister_driver(&denali_pci_driver);
-}
-
-module_init(denali_init);
-module_exit(denali_exit);
+module_pci_driver(denali_pci_driver);
index b08202664543200553255f89c00669e203783d14..a225e49a56235763b35cfd5118f30f7c20c4fb55 100644 (file)
@@ -720,6 +720,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
        struct docg4_priv *doc = nand->priv;
        void __iomem *docptr = doc->virtadr;
        uint16_t status, edc_err, *buf16;
+       int bits_corrected = 0;
 
        dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
 
@@ -772,7 +773,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
 
                /* If bitflips are reported, attempt to correct with ecc */
                if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
-                       int bits_corrected = correct_data(mtd, buf, page);
+                       bits_corrected = correct_data(mtd, buf, page);
                        if (bits_corrected == -EBADMSG)
                                mtd->ecc_stats.failed++;
                        else
@@ -781,24 +782,24 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
        }
 
        writew(0, docptr + DOC_DATAEND);
-       return 0;
+       return bits_corrected;
 }
 
 
 static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-                              uint8_t *buf, int page)
+                              uint8_t *buf, int oob_required, int page)
 {
        return read_page(mtd, nand, buf, page, false);
 }
 
 static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
-                          uint8_t *buf, int page)
+                          uint8_t *buf, int oob_required, int page)
 {
        return read_page(mtd, nand, buf, page, true);
 }
 
 static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
-                         int page, int sndcmd)
+                         int page)
 {
        struct docg4_priv *doc = nand->priv;
        void __iomem *docptr = doc->virtadr;
@@ -952,13 +953,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
 }
 
 static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-                                const uint8_t *buf)
+                                const uint8_t *buf, int oob_required)
 {
        return write_page(mtd, nand, buf, false);
 }
 
 static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
-                            const uint8_t *buf)
+                            const uint8_t *buf, int oob_required)
 {
        return write_page(mtd, nand, buf, true);
 }
@@ -1002,7 +1003,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
                return -ENOMEM;
 
        read_page_prologue(mtd, g4_addr);
-       status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+       status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
        if (status)
                goto exit;
 
@@ -1079,7 +1080,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
 
        /* write first page of block */
        write_page_prologue(mtd, g4_addr);
-       docg4_write_page(mtd, nand, buf);
+       docg4_write_page(mtd, nand, buf, 1);
        ret = pageprog(mtd);
        if (!ret)
                mtd->ecc_stats.badblocks++;
@@ -1192,8 +1193,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
        nand->ecc.prepad = 8;
        nand->ecc.bytes = 8;
        nand->ecc.strength = DOCG4_T;
-       nand->options =
-               NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+       nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
        nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
        nand->controller = &nand->hwcontrol;
        spin_lock_init(&nand->controller->lock);
index 80b5264f0a32f031a10f5b50e50987a1184646e1..784293806110acc63ee4ae062c689dab534860d6 100644 (file)
@@ -75,6 +75,7 @@ struct fsl_elbc_fcm_ctrl {
        unsigned int use_mdr;    /* Non zero if the MDR is to be set      */
        unsigned int oob;        /* Non zero if operating on OOB data     */
        unsigned int counter;    /* counter for the initializations       */
+       unsigned int max_bitflips;  /* Saved during READ0 cmd             */
 };
 
 /* These map to the positions used by the FCM hardware ECC generator */
@@ -253,6 +254,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
        if (chip->ecc.mode != NAND_ECC_HW)
                return 0;
 
+       elbc_fcm_ctrl->max_bitflips = 0;
+
        if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
                uint32_t lteccr = in_be32(&lbc->lteccr);
                /*
@@ -262,11 +265,16 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
                 * bits 28-31 are uncorrectable errors, marked elsewhere.
                 * for small page nand only 1 bit is used.
                 * if the ELBC doesn't have the lteccr register it reads 0
+                * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+                * count the number of sub-pages with bitflips and update
+                * ecc_stats.corrected accordingly.
                 */
                if (lteccr & 0x000F000F)
                        out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
-               if (lteccr & 0x000F0000)
+               if (lteccr & 0x000F0000) {
                        mtd->ecc_stats.corrected++;
+                       elbc_fcm_ctrl->max_bitflips = 1;
+               }
        }
 
        return 0;
@@ -738,26 +746,28 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
        return 0;
 }
 
-static int fsl_elbc_read_page(struct mtd_info *mtd,
-                              struct nand_chip *chip,
-                             uint8_t *buf,
-                             int page)
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+                             uint8_t *buf, int oob_required, int page)
 {
+       struct fsl_elbc_mtd *priv = chip->priv;
+       struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+       struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
        fsl_elbc_read_buf(mtd, buf, mtd->writesize);
-       fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
        if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
                mtd->ecc_stats.failed++;
 
-       return 0;
+       return elbc_fcm_ctrl->max_bitflips;
 }
 
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_elbc_write_page(struct mtd_info *mtd,
-                                struct nand_chip *chip,
-                                const uint8_t *buf)
+static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                               const uint8_t *buf, int oob_required)
 {
        fsl_elbc_write_buf(mtd, buf, mtd->writesize);
        fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -795,7 +805,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
        chip->bbt_md = &bbt_mirror_descr;
 
        /* set up nand options */
-       chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+       chip->options = NAND_NO_READRDY;
        chip->bbt_options = NAND_BBT_USE_FLASH;
 
        chip->controller = &elbc_fcm_ctrl->controller;
@@ -814,11 +824,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
                chip->ecc.size = 512;
                chip->ecc.bytes = 3;
                chip->ecc.strength = 1;
-               /*
-                * FIXME: can hardware ecc correct 4 bitflips if page size is
-                * 2k?  Then does hardware report number of corrections for this
-                * case?  If so, ecc_stats reporting needs to be fixed as well.
-                */
        } else {
                /* otherwise fall back to default software ECC */
                chip->ecc.mode = NAND_ECC_SOFT;
index c30ac7b83d284cf2da970ab755441b7ba40de0dd..9602c1b7e27e8e5a70c66f50a47df04693b4bf1e 100644 (file)
@@ -63,6 +63,7 @@ struct fsl_ifc_nand_ctrl {
        unsigned int oob;       /* Non zero if operating on OOB data    */
        unsigned int eccread;   /* Non zero for a full-page ECC read    */
        unsigned int counter;   /* counter for the initializations      */
+       unsigned int max_bitflips;  /* Saved during READ0 cmd           */
 };
 
 static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
@@ -262,6 +263,8 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
        if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
                dev_err(priv->dev, "NAND Flash Write Protect Error\n");
 
+       nctrl->max_bitflips = 0;
+
        if (nctrl->eccread) {
                int errors;
                int bufnum = nctrl->page & priv->bufnum_mask;
@@ -290,6 +293,9 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
                        }
 
                        mtd->ecc_stats.corrected += errors;
+                       nctrl->max_bitflips = max_t(unsigned int,
+                                                   nctrl->max_bitflips,
+                                                   errors);
                }
 
                nctrl->eccread = 0;
@@ -375,21 +381,31 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
 
                return;
 
-       /* READID must read all 8 possible bytes */
        case NAND_CMD_READID:
+       case NAND_CMD_PARAM: {
+               int timing = IFC_FIR_OP_RB;
+               if (command == NAND_CMD_PARAM)
+                       timing = IFC_FIR_OP_RBCD;
+
                out_be32(&ifc->ifc_nand.nand_fir0,
                                (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
                                (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-                               (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+                               (timing << IFC_NAND_FIR0_OP2_SHIFT));
                out_be32(&ifc->ifc_nand.nand_fcr0,
-                               NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
-               /* 8 bytes for manuf, device and exts */
-               out_be32(&ifc->ifc_nand.nand_fbcr, 8);
-               ifc_nand_ctrl->read_bytes = 8;
+                               command << IFC_NAND_FCR0_CMD0_SHIFT);
+               out_be32(&ifc->ifc_nand.row3, column);
+
+               /*
+                * although currently it's 8 bytes for READID, we always read
+                * the maximum 256 bytes(for PARAM)
+                */
+               out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+               ifc_nand_ctrl->read_bytes = 256;
 
                set_addr(mtd, 0, 0, 0);
                fsl_ifc_run_command(mtd);
                return;
+       }
 
        /* ERASE1 stores the block and page address */
        case NAND_CMD_ERASE1:
@@ -682,15 +698,16 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
        return nand_fsr | NAND_STATUS_WP;
 }
 
-static int fsl_ifc_read_page(struct mtd_info *mtd,
-                             struct nand_chip *chip,
-                             uint8_t *buf, int page)
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+                            uint8_t *buf, int oob_required, int page)
 {
        struct fsl_ifc_mtd *priv = chip->priv;
        struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+       struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
 
        fsl_ifc_read_buf(mtd, buf, mtd->writesize);
-       fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
        if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
                dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
@@ -698,15 +715,14 @@ static int fsl_ifc_read_page(struct mtd_info *mtd,
        if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
                mtd->ecc_stats.failed++;
 
-       return 0;
+       return nctrl->max_bitflips;
 }
 
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_ifc_write_page(struct mtd_info *mtd,
-                               struct nand_chip *chip,
-                               const uint8_t *buf)
+static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                              const uint8_t *buf, int oob_required)
 {
        fsl_ifc_write_buf(mtd, buf, mtd->writesize);
        fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -789,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
        out_be32(&ifc->ifc_nand.ncfgr, 0x0);
 
        /* set up nand options */
-       chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+       chip->options = NAND_NO_READRDY;
        chip->bbt_options = NAND_BBT_USE_FLASH;
 
 
@@ -811,6 +827,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
        /* Hardware generates ECC per 512 Bytes */
        chip->ecc.size = 512;
        chip->ecc.bytes = 8;
+       chip->ecc.strength = 4;
 
        switch (csor & CSOR_NAND_PGS_MASK) {
        case CSOR_NAND_PGS_512:
index 1b8330e1155a4468f4fd272a052b9b095850eddb..38d26240d8b152b06462794ed688905951bf7f4b 100644 (file)
@@ -692,6 +692,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
  * @mtd:       mtd info structure
  * @chip:      nand chip info structure
  * @buf:       buffer to store read data
+ * @oob_required:      caller expects OOB data read to chip->oob_poi
  * @page:      page number to read
  *
  * This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -701,7 +702,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
  * max of 8 bits)
  */
 static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                uint8_t *buf, int page)
+                                uint8_t *buf, int oob_required, int page)
 {
        struct fsmc_nand_data *host = container_of(mtd,
                                        struct fsmc_nand_data, mtd);
@@ -720,6 +721,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
         */
        uint16_t ecc_oob[7];
        uint8_t *oob = (uint8_t *)&ecc_oob[0];
+       unsigned int max_bitflips = 0;
 
        for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
                chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
@@ -748,13 +750,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
                chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
 
-       return 0;
+       return max_bitflips;
 }
 
 /*
@@ -994,9 +998,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
                return PTR_ERR(host->clk);
        }
 
-       ret = clk_enable(host->clk);
+       ret = clk_prepare_enable(host->clk);
        if (ret)
-               goto err_clk_enable;
+               goto err_clk_prepare_enable;
 
        /*
         * This device ID is actually a common AMBA ID as used on the
@@ -1176,8 +1180,8 @@ err_req_write_chnl:
        if (host->mode == USE_DMA_ACCESS)
                dma_release_channel(host->read_dma_chan);
 err_req_read_chnl:
-       clk_disable(host->clk);
-err_clk_enable:
+       clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
        clk_put(host->clk);
        return ret;
 }
@@ -1198,7 +1202,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
                        dma_release_channel(host->write_dma_chan);
                        dma_release_channel(host->read_dma_chan);
                }
-               clk_disable(host->clk);
+               clk_disable_unprepare(host->clk);
                clk_put(host->clk);
        }
 
@@ -1210,7 +1214,7 @@ static int fsmc_nand_suspend(struct device *dev)
 {
        struct fsmc_nand_data *host = dev_get_drvdata(dev);
        if (host)
-               clk_disable(host->clk);
+               clk_disable_unprepare(host->clk);
        return 0;
 }
 
@@ -1218,7 +1222,7 @@ static int fsmc_nand_resume(struct device *dev)
 {
        struct fsmc_nand_data *host = dev_get_drvdata(dev);
        if (host) {
-               clk_enable(host->clk);
+               clk_prepare_enable(host->clk);
                fsmc_nand_setup(host->regs_va, host->bank,
                                host->nand.options & NAND_BUSWIDTH_16,
                                host->dev_timings);
index 4effb8c579db0d5b3e9127dc3cedbe7630a532c3..a0924515c39644fcc9430938ded2795a2f356a4c 100644 (file)
 
 #define BP_BCH_FLASH0LAYOUT0_ECC0              12
 #define BM_BCH_FLASH0LAYOUT0_ECC0      (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
-#define BF_BCH_FLASH0LAYOUT0_ECC0(v)           \
-       (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0         11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x)                                \
+       (GPMI_IS_MX6Q(x)                                        \
+               ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)      \
+                       & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0)       \
+               : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0)           \
+                       & BM_BCH_FLASH0LAYOUT0_ECC0)            \
+       )
 
 #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE                0
 #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE                \
                        (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
-#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v)     \
-       (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
-                                        & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE   \
+                       (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x)                          \
+       (GPMI_IS_MX6Q(x)                                                \
+               ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)   \
+               : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)               \
+       )
 
 #define HW_BCH_FLASH0LAYOUT1                   0x00000090
 
 
 #define BP_BCH_FLASH0LAYOUT1_ECCN              12
 #define BM_BCH_FLASH0LAYOUT1_ECCN      (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
-#define BF_BCH_FLASH0LAYOUT1_ECCN(v)           \
-       (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN         11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x)                                \
+       (GPMI_IS_MX6Q(x)                                        \
+               ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)      \
+                       & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN)       \
+               : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN)           \
+                       & BM_BCH_FLASH0LAYOUT1_ECCN)            \
+       )
 
 #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE                0
 #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE                \
                        (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
-#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v)     \
-       (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
-                                        & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE   \
+                       (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x)                          \
+       (GPMI_IS_MX6Q(x)                                                \
+               ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)   \
+               : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)               \
+       )
 #endif
index e8ea7107932e9a9f784007da5be11cf01e32d5dd..a1f43329ad43d2c7898f7c978cb3f9f15721fe5e 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
-#include <mach/mxs.h>
 
 #include "gpmi-nand.h"
 #include "gpmi-regs.h"
@@ -37,6 +36,8 @@ struct timing_threshod timing_default_threshold = {
        .max_dll_delay_in_ns         = 16,
 };
 
+#define MXS_SET_ADDR           0x4
+#define MXS_CLR_ADDR           0x8
 /*
  * Clear the bit and poll it cleared.  This is usually called with
  * a reset address and mask being either SFTRST(bit 31) or CLKGATE
@@ -47,7 +48,7 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
        int timeout = 0x400;
 
        /* clear the bit */
-       __mxs_clrl(mask, addr);
+       writel(mask, addr + MXS_CLR_ADDR);
 
        /*
         * SFTRST needs 3 GPMI clocks to settle, the reference manual
@@ -92,11 +93,11 @@ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
                goto error;
 
        /* clear CLKGATE */
-       __mxs_clrl(MODULE_CLKGATE, reset_addr);
+       writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
 
        if (!just_enable) {
                /* set SFTRST to reset the block */
-               __mxs_setl(MODULE_SFTRST, reset_addr);
+               writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
                udelay(1);
 
                /* poll CLKGATE becoming set */
@@ -223,13 +224,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        /* Configure layout 0. */
        writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
                        | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
-                       | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
-                       | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+                       | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+                       | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
                        r->bch_regs + HW_BCH_FLASH0LAYOUT0);
 
        writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
-                       | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
-                       | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+                       | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+                       | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
                        r->bch_regs + HW_BCH_FLASH0LAYOUT1);
 
        /* Set *all* chip selects to use layout 0. */
@@ -255,11 +256,12 @@ static unsigned int ns_to_cycles(unsigned int time,
        return max(k, min);
 }
 
+#define DEF_MIN_PROP_DELAY     5
+#define DEF_MAX_PROP_DELAY     9
 /* Apply timing to current hardware conditions. */
 static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
                                        struct gpmi_nfc_hardware_timing *hw)
 {
-       struct gpmi_nand_platform_data *pdata = this->pdata;
        struct timing_threshod *nfc = &timing_default_threshold;
        struct nand_chip *nand = &this->nand;
        struct nand_timing target = this->timing;
@@ -276,8 +278,8 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
        int ideal_sample_delay_in_ns;
        unsigned int sample_delay_factor;
        int tEYE;
-       unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
-       unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+       unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+       unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
 
        /*
         * If there are multiple chips, we need to relax the timings to allow
@@ -803,7 +805,8 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
        if (GPMI_IS_MX23(this)) {
                mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
                reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
-       } else if (GPMI_IS_MX28(this)) {
+       } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+               /* MX28 shares the same R/B register as MX6Q. */
                mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
                reg = readl(r->gpmi_regs + HW_GPMI_STAT);
        } else
index b68e04310bd8e5b14327ff142788180ba8680754..a6cad5caba788fe8b665270a3fc7494d7a071887 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
@@ -387,7 +389,7 @@ static void release_bch_irq(struct gpmi_nand_data *this)
 static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
 {
        struct gpmi_nand_data *this = param;
-       struct resource *r = this->private;
+       int dma_channel = (int)this->private;
 
        if (!mxs_dma_is_apbh(chan))
                return false;
@@ -399,7 +401,7 @@ static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
         *      for mx28 :      MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
         *              (These eight channels share the same IRQ!)
         */
-       if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+       if (dma_channel == chan->chan_id) {
                chan->private = &this->dma_data;
                return true;
        }
@@ -419,57 +421,45 @@ static void release_dma_channels(struct gpmi_nand_data *this)
 static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
 {
        struct platform_device *pdev = this->pdev;
-       struct gpmi_nand_platform_data *pdata = this->pdata;
-       struct resources *res = &this->resources;
-       struct resource *r, *r_dma;
-       unsigned int i;
+       struct resource *r_dma;
+       struct device_node *dn;
+       int dma_channel;
+       unsigned int ret;
+       struct dma_chan *dma_chan;
+       dma_cap_mask_t mask;
+
+       /* dma channel, we only use the first one. */
+       dn = pdev->dev.of_node;
+       ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
+       if (ret) {
+               pr_err("unable to get DMA channel from dt.\n");
+               goto acquire_err;
+       }
+       this->private = (void *)dma_channel;
 
-       r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
-                                       GPMI_NAND_DMA_CHANNELS_RES_NAME);
+       /* gpmi dma interrupt */
        r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
                                        GPMI_NAND_DMA_INTERRUPT_RES_NAME);
-       if (!r || !r_dma) {
+       if (!r_dma) {
                pr_err("Can't get resource for DMA\n");
-               return -ENXIO;
+               goto acquire_err;
        }
+       this->dma_data.chan_irq = r_dma->start;
 
-       /* used in gpmi_dma_filter() */
-       this->private = r;
+       /* request dma channel */
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
 
-       for (i = r->start; i <= r->end; i++) {
-               struct dma_chan *dma_chan;
-               dma_cap_mask_t mask;
-
-               if (i - r->start >= pdata->max_chip_count)
-                       break;
-
-               dma_cap_zero(mask);
-               dma_cap_set(DMA_SLAVE, mask);
-
-               /* get the DMA interrupt */
-               if (r_dma->start == r_dma->end) {
-                       /* only register the first. */
-                       if (i == r->start)
-                               this->dma_data.chan_irq = r_dma->start;
-                       else
-                               this->dma_data.chan_irq = NO_IRQ;
-               } else
-                       this->dma_data.chan_irq = r_dma->start + (i - r->start);
-
-               dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
-               if (!dma_chan)
-                       goto acquire_err;
-
-               /* fill the first empty item */
-               this->dma_chans[i - r->start] = dma_chan;
+       dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+       if (!dma_chan) {
+               pr_err("dma_request_channel failed.\n");
+               goto acquire_err;
        }
 
-       res->dma_low_channel = r->start;
-       res->dma_high_channel = i;
+       this->dma_chans[0] = dma_chan;
        return 0;
 
 acquire_err:
-       pr_err("Can't acquire DMA channel %u\n", i);
        release_dma_channels(this);
        return -EINVAL;
 }
@@ -851,7 +841,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
 }
 
 static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        struct gpmi_nand_data *this = chip->priv;
        struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -917,17 +907,20 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                mtd->ecc_stats.corrected += corrected;
        }
 
-       /*
-        * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
-        * details about our policy for delivering the OOB.
-        *
-        * We fill the caller's buffer with set bits, and then copy the block
-        * mark to th caller's buffer. Note that, if block mark swapping was
-        * necessary, it has already been done, so we can rely on the first
-        * byte of the auxiliary buffer to contain the block mark.
-        */
-       memset(chip->oob_poi, ~0, mtd->oobsize);
-       chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+       if (oob_required) {
+               /*
+                * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+                * for details about our policy for delivering the OOB.
+                *
+                * We fill the caller's buffer with set bits, and then copy the
+                * block mark to th caller's buffer. Note that, if block mark
+                * swapping was necessary, it has already been done, so we can
+                * rely on the first byte of the auxiliary buffer to contain
+                * the block mark.
+                */
+               memset(chip->oob_poi, ~0, mtd->oobsize);
+               chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+       }
 
        read_page_swap_end(this, buf, mtd->writesize,
                        this->payload_virt, this->payload_phys,
@@ -937,8 +930,8 @@ exit_nfc:
        return ret;
 }
 
-static void gpmi_ecc_write_page(struct mtd_info *mtd,
-                               struct nand_chip *chip, const uint8_t *buf)
+static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                               const uint8_t *buf, int oob_required)
 {
        struct gpmi_nand_data *this = chip->priv;
        struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1077,7 +1070,7 @@ exit_auxiliary:
  * this driver.
  */
 static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                               int page, int sndcmd)
+                               int page)
 {
        struct gpmi_nand_data *this = chip->priv;
 
@@ -1100,11 +1093,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
                chip->oob_poi[0] = chip->read_byte(mtd);
        }
 
-       /*
-        * Return true, indicating that the next call to this function must send
-        * a command.
-        */
-       return true;
+       return 0;
 }
 
 static int
@@ -1318,7 +1307,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
                /* Write the first page of the current stride. */
                dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
                chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-               chip->ecc.write_page_raw(mtd, chip, buffer);
+               chip->ecc.write_page_raw(mtd, chip, buffer, 0);
                chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 
                /* Wait for the write to finish. */
@@ -1444,6 +1433,10 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this)
        if (ret)
                return ret;
 
+       /* Adjust the ECC strength according to the chip. */
+       this->nand.ecc.strength = this->bch_geometry.ecc_strength;
+       this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
+
        /* NAND boot init, depends on the gpmi_set_geometry(). */
        return nand_boot_init(this);
 }
@@ -1471,9 +1464,9 @@ void gpmi_nfc_exit(struct gpmi_nand_data *this)
 
 static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
 {
-       struct gpmi_nand_platform_data *pdata = this->pdata;
        struct mtd_info  *mtd = &this->mtd;
        struct nand_chip *chip = &this->nand;
+       struct mtd_part_parser_data ppdata = {};
        int ret;
 
        /* init current chip */
@@ -1502,6 +1495,7 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
        chip->options           |= NAND_NO_SUBPAGE_WRITE;
        chip->ecc.mode          = NAND_ECC_HW;
        chip->ecc.size          = 1;
+       chip->ecc.strength      = 8;
        chip->ecc.layout        = &gpmi_hw_ecclayout;
 
        /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
@@ -1511,14 +1505,14 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
        if (ret)
                goto err_out;
 
-       ret = nand_scan(mtd, pdata->max_chip_count);
+       ret = nand_scan(mtd, 1);
        if (ret) {
                pr_err("Chip scan failed\n");
                goto err_out;
        }
 
-       ret = mtd_device_parse_register(mtd, NULL, NULL,
-                       pdata->partitions, pdata->partition_count);
+       ppdata.of_node = this->pdev->dev.of_node;
+       ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
        if (ret)
                goto err_out;
        return 0;
@@ -1528,12 +1522,41 @@ err_out:
        return ret;
 }
 
+static const struct platform_device_id gpmi_ids[] = {
+       { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
+       { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
+       { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
+       {},
+};
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+       {
+               .compatible = "fsl,imx23-gpmi-nand",
+               .data = (void *)&gpmi_ids[IS_MX23]
+       }, {
+               .compatible = "fsl,imx28-gpmi-nand",
+               .data = (void *)&gpmi_ids[IS_MX28]
+       }, {
+               .compatible = "fsl,imx6q-gpmi-nand",
+               .data = (void *)&gpmi_ids[IS_MX6Q]
+       }, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
 static int __devinit gpmi_nand_probe(struct platform_device *pdev)
 {
-       struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
        struct gpmi_nand_data *this;
+       const struct of_device_id *of_id;
        int ret;
 
+       of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+       if (of_id) {
+               pdev->id_entry = of_id->data;
+       } else {
+               pr_err("Failed to find the right device id.\n");
+               return -ENOMEM;
+       }
+
        this = kzalloc(sizeof(*this), GFP_KERNEL);
        if (!this) {
                pr_err("Failed to allocate per-device memory\n");
@@ -1543,13 +1566,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, this);
        this->pdev  = pdev;
        this->dev   = &pdev->dev;
-       this->pdata = pdata;
-
-       if (pdata->platform_init) {
-               ret = pdata->platform_init();
-               if (ret)
-                       goto platform_init_error;
-       }
 
        ret = acquire_resources(this);
        if (ret)
@@ -1567,7 +1583,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
 
 exit_nfc_init:
        release_resources(this);
-platform_init_error:
 exit_acquire_resources:
        platform_set_drvdata(pdev, NULL);
        kfree(this);
@@ -1585,19 +1600,10 @@ static int __exit gpmi_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct platform_device_id gpmi_ids[] = {
-       {
-               .name = "imx23-gpmi-nand",
-               .driver_data = IS_MX23,
-       }, {
-               .name = "imx28-gpmi-nand",
-               .driver_data = IS_MX28,
-       }, {},
-};
-
 static struct platform_driver gpmi_nand_driver = {
        .driver = {
                .name = "gpmi-nand",
+               .of_match_table = gpmi_nand_id_table,
        },
        .probe   = gpmi_nand_probe,
        .remove  = __exit_p(gpmi_nand_remove),
index ec6180d4ff8ffa22314ce923802c606910492166..ce5daa1609203923caee19f576be884c27919a97 100644 (file)
@@ -266,8 +266,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
 #define STATUS_UNCORRECTABLE   0xfe
 
 /* Use the platform_id to distinguish different Archs. */
-#define IS_MX23                        0x1
-#define IS_MX28                        0x2
+#define IS_MX23                        0x0
+#define IS_MX28                        0x1
+#define IS_MX6Q                        0x2
 #define GPMI_IS_MX23(x)                ((x)->pdev->id_entry->driver_data == IS_MX23)
 #define GPMI_IS_MX28(x)                ((x)->pdev->id_entry->driver_data == IS_MX28)
+#define GPMI_IS_MX6Q(x)                ((x)->pdev->id_entry->driver_data == IS_MX6Q)
 #endif
index 9bf5ce5fa22d0a5c6ad67d0800131efd27157ebb..50166e93ba96696e562d201c694dcdb51488bb14 100644 (file)
@@ -124,7 +124,6 @@ static int __init h1910_init(void)
        /* 15 us command delay time */
        this->chip_delay = 50;
        this->ecc.mode = NAND_ECC_SOFT;
-       this->options = NAND_NO_AUTOINCR;
 
        /* Scan to find existence of the device */
        if (nand_scan(h1910_nand_mtd, 1)) {
index e4147e8acb7c560f2a8c3fdae68bf1027266fdba..a6fa884ae49bb08deba807ae6aeca7f0fcbabff7 100644 (file)
@@ -332,11 +332,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
        chip->ecc.mode          = NAND_ECC_HW_OOB_FIRST;
        chip->ecc.size          = 512;
        chip->ecc.bytes         = 9;
-       chip->ecc.strength      = 2;
-       /*
-        * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
-        * conservative guess, given 9 ecc bytes and reed-solomon alg.
-        */
+       chip->ecc.strength      = 4;
 
        if (pdata)
                chip->ecc.layout = pdata->ecc_layout;
index c240cf1af96166f8c2431e029dc90064a443ecf4..c259c24d7986034f3a2d14e02d5c06fb9a19a323 100644 (file)
@@ -734,7 +734,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
        chip->write_buf = mpc5121_nfc_write_buf;
        chip->verify_buf = mpc5121_nfc_verify_buf;
        chip->select_chip = mpc5121_nfc_select_chip;
-       chip->options = NAND_NO_AUTOINCR;
        chip->bbt_options = NAND_BBT_USE_FLASH;
        chip->ecc.mode = NAND_ECC_SOFT;
 
index 9e374e9bd2966bf197b348744c3e11c6b6fa1df4..6acc790c2fbb96880ec29642a1d2e7e2528dbee9 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
 
 #include <asm/mach/flash.h>
 #include <mach/mxc_nand.h>
 
 #define NFC_V3_DELAY_LINE              (host->regs_ip + 0x34)
 
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+       void (*preset)(struct mtd_info *);
+       void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+       void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+       void (*send_page)(struct mtd_info *, unsigned int);
+       void (*send_read_id)(struct mxc_nand_host *);
+       uint16_t (*get_dev_status)(struct mxc_nand_host *);
+       int (*check_int)(struct mxc_nand_host *);
+       void (*irq_control)(struct mxc_nand_host *, int);
+       u32 (*get_ecc_status)(struct mxc_nand_host *);
+       struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+       void (*select_chip)(struct mtd_info *mtd, int chip);
+       int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+                       u_char *read_ecc, u_char *calc_ecc);
+
+       /*
+        * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+        * (CONFIG1:INT_MSK is set). To handle this the driver uses
+        * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+        */
+       int irqpending_quirk;
+       int needs_ip;
+
+       size_t regs_offset;
+       size_t spare0_offset;
+       size_t axi_offset;
+
+       int spare_len;
+       int eccbytes;
+       int eccsize;
+};
+
 struct mxc_nand_host {
        struct mtd_info         mtd;
        struct nand_chip        nand;
        struct device           *dev;
 
-       void                    *spare0;
-       void                    *main_area0;
+       void __iomem            *spare0;
+       void __iomem            *main_area0;
 
        void __iomem            *base;
        void __iomem            *regs;
@@ -163,16 +199,9 @@ struct mxc_nand_host {
 
        uint8_t                 *data_buf;
        unsigned int            buf_start;
-       int                     spare_len;
-
-       void                    (*preset)(struct mtd_info *);
-       void                    (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
-       void                    (*send_addr)(struct mxc_nand_host *, uint16_t, int);
-       void                    (*send_page)(struct mtd_info *, unsigned int);
-       void                    (*send_read_id)(struct mxc_nand_host *);
-       uint16_t                (*get_dev_status)(struct mxc_nand_host *);
-       int                     (*check_int)(struct mxc_nand_host *);
-       void                    (*irq_control)(struct mxc_nand_host *, int);
+
+       const struct mxc_nand_devtype_data *devtype_data;
+       struct mxc_nand_platform_data pdata;
 };
 
 /* OOB placement block for use with hardware ecc generation */
@@ -242,20 +271,26 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
        }
 };
 
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
 
-static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
 {
-       struct mxc_nand_host *host = dev_id;
-
-       if (!host->check_int(host))
-               return IRQ_NONE;
+       int i;
+       u32 *t = trg;
+       const __iomem u32 *s = src;
 
-       host->irq_control(host, 0);
+       for (i = 0; i < (size >> 2); i++)
+               *t++ = __raw_readl(s++);
+}
 
-       complete(&host->op_completion);
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+       int i;
+       u32 __iomem *t = trg;
+       const u32 *s = src;
 
-       return IRQ_HANDLED;
+       for (i = 0; i < (size >> 2); i++)
+               __raw_writel(*s++, t++);
 }
 
 static int check_int_v3(struct mxc_nand_host *host)
@@ -280,26 +315,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
        if (!(tmp & NFC_V1_V2_CONFIG2_INT))
                return 0;
 
-       if (!cpu_is_mx21())
+       if (!host->devtype_data->irqpending_quirk)
                writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
 
        return 1;
 }
 
-/*
- * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
- * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
- * driver can enable/disable the irq line rather than simply masking the
- * interrupts.
- */
-static void irq_control_mx21(struct mxc_nand_host *host, int activate)
-{
-       if (activate)
-               enable_irq(host->irq);
-       else
-               disable_irq_nosync(host->irq);
-}
-
 static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
 {
        uint16_t tmp;
@@ -328,6 +349,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
        writel(tmp, NFC_V3_CONFIG2);
 }
 
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+       if (host->devtype_data->irqpending_quirk) {
+               if (activate)
+                       enable_irq(host->irq);
+               else
+                       disable_irq_nosync(host->irq);
+       } else {
+               host->devtype_data->irq_control(host, activate);
+       }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+       return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+       return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+       return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+       struct mxc_nand_host *host = dev_id;
+
+       if (!host->devtype_data->check_int(host))
+               return IRQ_NONE;
+
+       irq_control(host, 0);
+
+       complete(&host->op_completion);
+
+       return IRQ_HANDLED;
+}
+
 /* This function polls the NANDFC to wait for the basic operation to
  * complete by checking the INT bit of config2 register.
  */
@@ -336,14 +398,14 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
        int max_retries = 8000;
 
        if (useirq) {
-               if (!host->check_int(host)) {
+               if (!host->devtype_data->check_int(host)) {
                        INIT_COMPLETION(host->op_completion);
-                       host->irq_control(host, 1);
+                       irq_control(host, 1);
                        wait_for_completion(&host->op_completion);
                }
        } else {
                while (max_retries-- > 0) {
-                       if (host->check_int(host))
+                       if (host->devtype_data->check_int(host))
                                break;
 
                        udelay(1);
@@ -374,7 +436,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
        writew(cmd, NFC_V1_V2_FLASH_CMD);
        writew(NFC_CMD, NFC_V1_V2_CONFIG2);
 
-       if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+       if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
                int max_retries = 100;
                /* Reset completion is indicated by NFC_CONFIG2 */
                /* being set to 0 */
@@ -433,13 +495,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
        wait_op_done(host, false);
 }
 
-static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct mxc_nand_host *host = nand_chip->priv;
+
+       /* NANDFC buffer 0 is used for page read/write */
+       writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+       writew(ops, NFC_V1_V2_CONFIG2);
+
+       /* Wait for operation to complete */
+       wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
        int bufs, i;
 
-       if (nfc_is_v1() && mtd->writesize > 512)
+       if (mtd->writesize > 512)
                bufs = 4;
        else
                bufs = 1;
@@ -463,7 +539,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
 
        wait_op_done(host, true);
 
-       memcpy(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 }
 
 /* Request the NANDFC to perform a read of the NAND device ID. */
@@ -479,7 +555,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
        /* Wait for operation to complete */
        wait_op_done(host, true);
 
-       memcpy(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 
        if (this->options & NAND_BUSWIDTH_16) {
                /* compress the ID info */
@@ -555,7 +631,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
         * additional correction.  2-Bit errors cannot be corrected by
         * HW ECC, so we need to return failure
         */
-       uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
+       uint16_t ecc_status = get_ecc_status_v1(host);
 
        if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
                pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
@@ -580,10 +656,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
 
        no_subpages = mtd->writesize >> 9;
 
-       if (nfc_is_v21())
-               ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
-       else
-               ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+       ecc_stat = host->devtype_data->get_ecc_status(host);
 
        do {
                err = ecc_stat & ecc_bit_mask;
@@ -616,7 +689,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
 
        /* Check for status request */
        if (host->status_request)
-               return host->get_dev_status(host) & 0xFF;
+               return host->devtype_data->get_dev_status(host) & 0xFF;
 
        ret = *(uint8_t *)(host->data_buf + host->buf_start);
        host->buf_start++;
@@ -682,7 +755,7 @@ static int mxc_nand_verify_buf(struct mtd_info *mtd,
 
 /* This function is used by upper layer for select and
  * deselect of the NAND chip */
-static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
@@ -701,11 +774,30 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
                clk_prepare_enable(host->clk);
                host->clk_act = 1;
        }
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct mxc_nand_host *host = nand_chip->priv;
+
+       if (chip == -1) {
+               /* Disable the NFC clock */
+               if (host->clk_act) {
+                       clk_disable(host->clk);
+                       host->clk_act = 0;
+               }
+               return;
+       }
 
-       if (nfc_is_v21()) {
-               host->active_cs = chip;
-               writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+       if (!host->clk_act) {
+               /* Enable the NFC clock */
+               clk_enable(host->clk);
+               host->clk_act = 1;
        }
+
+       host->active_cs = chip;
+       writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
 }
 
 /*
@@ -718,23 +810,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
        u16 i, j;
        u16 n = mtd->writesize >> 9;
        u8 *d = host->data_buf + mtd->writesize;
-       u8 *s = host->spare0;
-       u16 t = host->spare_len;
+       u8 __iomem *s = host->spare0;
+       u16 t = host->devtype_data->spare_len;
 
        j = (mtd->oobsize / n >> 1) << 1;
 
        if (bfrom) {
                for (i = 0; i < n - 1; i++)
-                       memcpy(d + i * j, s + i * t, j);
+                       memcpy32_fromio(d + i * j, s + i * t, j);
 
                /* the last section */
-               memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+               memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
        } else {
                for (i = 0; i < n - 1; i++)
-                       memcpy(&s[i * t], &d[i * j], j);
+                       memcpy32_toio(&s[i * t], &d[i * j], j);
 
                /* the last section */
-               memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+               memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
        }
 }
 
@@ -751,34 +843,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
                 * perform a read/write buf operation, the saved column
                  * address is used to index into the full page.
                 */
-               host->send_addr(host, 0, page_addr == -1);
+               host->devtype_data->send_addr(host, 0, page_addr == -1);
                if (mtd->writesize > 512)
                        /* another col addr cycle for 2k page */
-                       host->send_addr(host, 0, false);
+                       host->devtype_data->send_addr(host, 0, false);
        }
 
        /* Write out page address, if necessary */
        if (page_addr != -1) {
                /* paddr_0 - p_addr_7 */
-               host->send_addr(host, (page_addr & 0xff), false);
+               host->devtype_data->send_addr(host, (page_addr & 0xff), false);
 
                if (mtd->writesize > 512) {
                        if (mtd->size >= 0x10000000) {
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, false);
-                               host->send_addr(host, (page_addr >> 16) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff,
+                                               false);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 16) & 0xff,
+                                               true);
                        } else
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff, true);
                } else {
                        /* One more address cycle for higher density devices */
                        if (mtd->size >= 0x4000000) {
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, false);
-                               host->send_addr(host, (page_addr >> 16) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff,
+                                               false);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 16) & 0xff,
+                                               true);
                        } else
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff, true);
                }
        }
 }
@@ -800,7 +902,35 @@ static int get_eccsize(struct mtd_info *mtd)
                return 8;
 }
 
-static void preset_v1_v2(struct mtd_info *mtd)
+static void preset_v1(struct mtd_info *mtd)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct mxc_nand_host *host = nand_chip->priv;
+       uint16_t config1 = 0;
+
+       if (nand_chip->ecc.mode == NAND_ECC_HW)
+               config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+       if (!host->devtype_data->irqpending_quirk)
+               config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+       host->eccsize = 1;
+
+       writew(config1, NFC_V1_V2_CONFIG1);
+       /* preset operation */
+
+       /* Unlock the internal RAM Buffer */
+       writew(0x2, NFC_V1_V2_CONFIG);
+
+       /* Blocks to be unlocked */
+       writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+       writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+       /* Unlock Block Command for given address range */
+       writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v2(struct mtd_info *mtd)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
@@ -809,13 +939,12 @@ static void preset_v1_v2(struct mtd_info *mtd)
        if (nand_chip->ecc.mode == NAND_ECC_HW)
                config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
 
-       if (nfc_is_v21())
-               config1 |= NFC_V2_CONFIG1_FP_INT;
+       config1 |= NFC_V2_CONFIG1_FP_INT;
 
-       if (!cpu_is_mx21())
+       if (!host->devtype_data->irqpending_quirk)
                config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
 
-       if (nfc_is_v21() && mtd->writesize) {
+       if (mtd->writesize) {
                uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
 
                host->eccsize = get_eccsize(mtd);
@@ -834,20 +963,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
        writew(0x2, NFC_V1_V2_CONFIG);
 
        /* Blocks to be unlocked */
-       if (nfc_is_v21()) {
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
-       } else if (nfc_is_v1()) {
-               writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
-               writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
-       } else
-               BUG();
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
 
        /* Unlock Block Command for given address range */
        writew(0x4, NFC_V1_V2_WRPROT);
@@ -937,15 +1060,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
        /* Command pre-processing step */
        switch (command) {
        case NAND_CMD_RESET:
-               host->preset(mtd);
-               host->send_cmd(host, command, false);
+               host->devtype_data->preset(mtd);
+               host->devtype_data->send_cmd(host, command, false);
                break;
 
        case NAND_CMD_STATUS:
                host->buf_start = 0;
                host->status_request = true;
 
-               host->send_cmd(host, command, true);
+               host->devtype_data->send_cmd(host, command, true);
                mxc_do_addr_cycle(mtd, column, page_addr);
                break;
 
@@ -958,15 +1081,17 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
 
                command = NAND_CMD_READ0; /* only READ0 is valid */
 
-               host->send_cmd(host, command, false);
+               host->devtype_data->send_cmd(host, command, false);
                mxc_do_addr_cycle(mtd, column, page_addr);
 
                if (mtd->writesize > 512)
-                       host->send_cmd(host, NAND_CMD_READSTART, true);
+                       host->devtype_data->send_cmd(host,
+                                       NAND_CMD_READSTART, true);
 
-               host->send_page(mtd, NFC_OUTPUT);
+               host->devtype_data->send_page(mtd, NFC_OUTPUT);
 
-               memcpy(host->data_buf, host->main_area0, mtd->writesize);
+               memcpy32_fromio(host->data_buf, host->main_area0,
+                               mtd->writesize);
                copy_spare(mtd, true);
                break;
 
@@ -977,28 +1102,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
 
                host->buf_start = column;
 
-               host->send_cmd(host, command, false);
+               host->devtype_data->send_cmd(host, command, false);
                mxc_do_addr_cycle(mtd, column, page_addr);
                break;
 
        case NAND_CMD_PAGEPROG:
-               memcpy(host->main_area0, host->data_buf, mtd->writesize);
+               memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
                copy_spare(mtd, false);
-               host->send_page(mtd, NFC_INPUT);
-               host->send_cmd(host, command, true);
+               host->devtype_data->send_page(mtd, NFC_INPUT);
+               host->devtype_data->send_cmd(host, command, true);
                mxc_do_addr_cycle(mtd, column, page_addr);
                break;
 
        case NAND_CMD_READID:
-               host->send_cmd(host, command, true);
+               host->devtype_data->send_cmd(host, command, true);
                mxc_do_addr_cycle(mtd, column, page_addr);
-               host->send_read_id(host);
+               host->devtype_data->send_read_id(host);
                host->buf_start = column;
                break;
 
        case NAND_CMD_ERASE1:
        case NAND_CMD_ERASE2:
-               host->send_cmd(host, command, false);
+               host->devtype_data->send_cmd(host, command, false);
                mxc_do_addr_cycle(mtd, column, page_addr);
 
                break;
@@ -1032,15 +1157,191 @@ static struct nand_bbt_descr bbt_mirror_descr = {
        .pattern = mirror_pattern,
 };
 
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+       .preset = preset_v1,
+       .send_cmd = send_cmd_v1_v2,
+       .send_addr = send_addr_v1_v2,
+       .send_page = send_page_v1,
+       .send_read_id = send_read_id_v1_v2,
+       .get_dev_status = get_dev_status_v1_v2,
+       .check_int = check_int_v1_v2,
+       .irq_control = irq_control_v1_v2,
+       .get_ecc_status = get_ecc_status_v1,
+       .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+       .select_chip = mxc_nand_select_chip_v1_v3,
+       .correct_data = mxc_nand_correct_data_v1,
+       .irqpending_quirk = 1,
+       .needs_ip = 0,
+       .regs_offset = 0xe00,
+       .spare0_offset = 0x800,
+       .spare_len = 16,
+       .eccbytes = 3,
+       .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+       .preset = preset_v1,
+       .send_cmd = send_cmd_v1_v2,
+       .send_addr = send_addr_v1_v2,
+       .send_page = send_page_v1,
+       .send_read_id = send_read_id_v1_v2,
+       .get_dev_status = get_dev_status_v1_v2,
+       .check_int = check_int_v1_v2,
+       .irq_control = irq_control_v1_v2,
+       .get_ecc_status = get_ecc_status_v1,
+       .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+       .select_chip = mxc_nand_select_chip_v1_v3,
+       .correct_data = mxc_nand_correct_data_v1,
+       .irqpending_quirk = 0,
+       .needs_ip = 0,
+       .regs_offset = 0xe00,
+       .spare0_offset = 0x800,
+       .axi_offset = 0,
+       .spare_len = 16,
+       .eccbytes = 3,
+       .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+       .preset = preset_v2,
+       .send_cmd = send_cmd_v1_v2,
+       .send_addr = send_addr_v1_v2,
+       .send_page = send_page_v2,
+       .send_read_id = send_read_id_v1_v2,
+       .get_dev_status = get_dev_status_v1_v2,
+       .check_int = check_int_v1_v2,
+       .irq_control = irq_control_v1_v2,
+       .get_ecc_status = get_ecc_status_v2,
+       .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv2_hw_eccoob_4k,
+       .select_chip = mxc_nand_select_chip_v2,
+       .correct_data = mxc_nand_correct_data_v2_v3,
+       .irqpending_quirk = 0,
+       .needs_ip = 0,
+       .regs_offset = 0x1e00,
+       .spare0_offset = 0x1000,
+       .axi_offset = 0,
+       .spare_len = 64,
+       .eccbytes = 9,
+       .eccsize = 0,
+};
+
+/* v3: i.MX51, i.MX53 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+       .preset = preset_v3,
+       .send_cmd = send_cmd_v3,
+       .send_addr = send_addr_v3,
+       .send_page = send_page_v3,
+       .send_read_id = send_read_id_v3,
+       .get_dev_status = get_dev_status_v3,
+       .check_int = check_int_v3,
+       .irq_control = irq_control_v3,
+       .get_ecc_status = get_ecc_status_v3,
+       .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+       .select_chip = mxc_nand_select_chip_v1_v3,
+       .correct_data = mxc_nand_correct_data_v2_v3,
+       .irqpending_quirk = 0,
+       .needs_ip = 1,
+       .regs_offset = 0,
+       .spare0_offset = 0x1000,
+       .axi_offset = 0x1e00,
+       .spare_len = 64,
+       .eccbytes = 0,
+       .eccsize = 0,
+};
+
+#ifdef CONFIG_OF_MTD
+static const struct of_device_id mxcnd_dt_ids[] = {
+       {
+               .compatible = "fsl,imx21-nand",
+               .data = &imx21_nand_devtype_data,
+       }, {
+               .compatible = "fsl,imx27-nand",
+               .data = &imx27_nand_devtype_data,
+       }, {
+               .compatible = "fsl,imx25-nand",
+               .data = &imx25_nand_devtype_data,
+       }, {
+               .compatible = "fsl,imx51-nand",
+               .data = &imx51_nand_devtype_data,
+       },
+       { /* sentinel */ }
+};
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+       struct device_node *np = host->dev->of_node;
+       struct mxc_nand_platform_data *pdata = &host->pdata;
+       const struct of_device_id *of_id =
+               of_match_device(mxcnd_dt_ids, host->dev);
+       int buswidth;
+
+       if (!np)
+               return 1;
+
+       if (of_get_nand_ecc_mode(np) >= 0)
+               pdata->hw_ecc = 1;
+
+       pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+       buswidth = of_get_nand_bus_width(np);
+       if (buswidth < 0)
+               return buswidth;
+
+       pdata->width = buswidth / 8;
+
+       host->devtype_data = of_id->data;
+
+       return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+       return 1;
+}
+#endif
+
+static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
+{
+       struct mxc_nand_platform_data *pdata = host->dev->platform_data;
+
+       if (!pdata)
+               return -ENODEV;
+
+       host->pdata = *pdata;
+
+       if (nfc_is_v1()) {
+               if (cpu_is_mx21())
+                       host->devtype_data = &imx21_nand_devtype_data;
+               else
+                       host->devtype_data = &imx27_nand_devtype_data;
+       } else if (nfc_is_v21()) {
+               host->devtype_data = &imx25_nand_devtype_data;
+       } else if (nfc_is_v3_2()) {
+               host->devtype_data = &imx51_nand_devtype_data;
+       } else
+               BUG();
+
+       return 0;
+}
+
 static int __init mxcnd_probe(struct platform_device *pdev)
 {
        struct nand_chip *this;
        struct mtd_info *mtd;
-       struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
        struct mxc_nand_host *host;
        struct resource *res;
        int err = 0;
-       struct nand_ecclayout *oob_smallpage, *oob_largepage;
 
        /* Allocate memory for MTD device structure and private data */
        host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
@@ -1065,7 +1366,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
        this->priv = host;
        this->dev_ready = mxc_nand_dev_ready;
        this->cmdfunc = mxc_nand_command;
-       this->select_chip = mxc_nand_select_chip;
        this->read_byte = mxc_nand_read_byte;
        this->read_word = mxc_nand_read_word;
        this->write_buf = mxc_nand_write_buf;
@@ -1095,36 +1395,26 @@ static int __init mxcnd_probe(struct platform_device *pdev)
 
        host->main_area0 = host->base;
 
-       if (nfc_is_v1() || nfc_is_v21()) {
-               host->preset = preset_v1_v2;
-               host->send_cmd = send_cmd_v1_v2;
-               host->send_addr = send_addr_v1_v2;
-               host->send_page = send_page_v1_v2;
-               host->send_read_id = send_read_id_v1_v2;
-               host->get_dev_status = get_dev_status_v1_v2;
-               host->check_int = check_int_v1_v2;
-               if (cpu_is_mx21())
-                       host->irq_control = irq_control_mx21;
-               else
-                       host->irq_control = irq_control_v1_v2;
-       }
+       err = mxcnd_probe_dt(host);
+       if (err > 0)
+               err = mxcnd_probe_pdata(host);
+       if (err < 0)
+               goto eirq;
 
-       if (nfc_is_v21()) {
-               host->regs = host->base + 0x1e00;
-               host->spare0 = host->base + 0x1000;
-               host->spare_len = 64;
-               oob_smallpage = &nandv2_hw_eccoob_smallpage;
-               oob_largepage = &nandv2_hw_eccoob_largepage;
-               this->ecc.bytes = 9;
-       } else if (nfc_is_v1()) {
-               host->regs = host->base + 0xe00;
-               host->spare0 = host->base + 0x800;
-               host->spare_len = 16;
-               oob_smallpage = &nandv1_hw_eccoob_smallpage;
-               oob_largepage = &nandv1_hw_eccoob_largepage;
-               this->ecc.bytes = 3;
-               host->eccsize = 1;
-       } else if (nfc_is_v3_2()) {
+       if (host->devtype_data->regs_offset)
+               host->regs = host->base + host->devtype_data->regs_offset;
+       host->spare0 = host->base + host->devtype_data->spare0_offset;
+       if (host->devtype_data->axi_offset)
+               host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+       this->ecc.bytes = host->devtype_data->eccbytes;
+       host->eccsize = host->devtype_data->eccsize;
+
+       this->select_chip = host->devtype_data->select_chip;
+       this->ecc.size = 512;
+       this->ecc.layout = host->devtype_data->ecclayout_512;
+
+       if (host->devtype_data->needs_ip) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
                if (!res) {
                        err = -ENODEV;
@@ -1135,42 +1425,22 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                        err = -ENOMEM;
                        goto eirq;
                }
-               host->regs_axi = host->base + 0x1e00;
-               host->spare0 = host->base + 0x1000;
-               host->spare_len = 64;
-               host->preset = preset_v3;
-               host->send_cmd = send_cmd_v3;
-               host->send_addr = send_addr_v3;
-               host->send_page = send_page_v3;
-               host->send_read_id = send_read_id_v3;
-               host->check_int = check_int_v3;
-               host->get_dev_status = get_dev_status_v3;
-               host->irq_control = irq_control_v3;
-               oob_smallpage = &nandv2_hw_eccoob_smallpage;
-               oob_largepage = &nandv2_hw_eccoob_largepage;
-       } else
-               BUG();
-
-       this->ecc.size = 512;
-       this->ecc.layout = oob_smallpage;
+       }
 
-       if (pdata->hw_ecc) {
+       if (host->pdata.hw_ecc) {
                this->ecc.calculate = mxc_nand_calculate_ecc;
                this->ecc.hwctl = mxc_nand_enable_hwecc;
-               if (nfc_is_v1())
-                       this->ecc.correct = mxc_nand_correct_data_v1;
-               else
-                       this->ecc.correct = mxc_nand_correct_data_v2_v3;
+               this->ecc.correct = host->devtype_data->correct_data;
                this->ecc.mode = NAND_ECC_HW;
        } else {
                this->ecc.mode = NAND_ECC_SOFT;
        }
 
-       /* NAND bus width determines access funtions used by upper layer */
-       if (pdata->width == 2)
+       /* NAND bus width determines access functions used by upper layer */
+       if (host->pdata.width == 2)
                this->options |= NAND_BUSWIDTH_16;
 
-       if (pdata->flash_bbt) {
+       if (host->pdata.flash_bbt) {
                this->bbt_td = &bbt_main_descr;
                this->bbt_md = &bbt_mirror_descr;
                /* update flash based bbt */
@@ -1182,28 +1452,25 @@ static int __init mxcnd_probe(struct platform_device *pdev)
        host->irq = platform_get_irq(pdev, 0);
 
        /*
-        * mask the interrupt. For i.MX21 explicitely call
-        * irq_control_v1_v2 to use the mask bit. We can't call
-        * disable_irq_nosync() for an interrupt we do not own yet.
+        * Use host->devtype_data->irq_control() here instead of irq_control()
+        * because we must not disable_irq_nosync without having requested the
+        * irq.
         */
-       if (cpu_is_mx21())
-               irq_control_v1_v2(host, 0);
-       else
-               host->irq_control(host, 0);
+       host->devtype_data->irq_control(host, 0);
 
        err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
        if (err)
                goto eirq;
 
-       host->irq_control(host, 0);
-
        /*
-        * Now that the interrupt is disabled make sure the interrupt
-        * mask bit is cleared on i.MX21. Otherwise we can't read
-        * the interrupt status bit on this machine.
+        * Now that we "own" the interrupt make sure the interrupt mask bit is
+        * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+        * on this machine.
         */
-       if (cpu_is_mx21())
-               irq_control_v1_v2(host, 1);
+       if (host->devtype_data->irqpending_quirk) {
+               disable_irq_nosync(host->irq);
+               host->devtype_data->irq_control(host, 1);
+       }
 
        /* first scan to find the device and get the page size */
        if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
@@ -1212,18 +1479,12 @@ static int __init mxcnd_probe(struct platform_device *pdev)
        }
 
        /* Call preset again, with correct writesize this time */
-       host->preset(mtd);
+       host->devtype_data->preset(mtd);
 
        if (mtd->writesize == 2048)
-               this->ecc.layout = oob_largepage;
-       if (nfc_is_v21() && mtd->writesize == 4096)
-               this->ecc.layout = &nandv2_hw_eccoob_4k;
-
-       /* second phase scan */
-       if (nand_scan_tail(mtd)) {
-               err = -ENXIO;
-               goto escan;
-       }
+               this->ecc.layout = host->devtype_data->ecclayout_2k;
+       else if (mtd->writesize == 4096)
+               this->ecc.layout = host->devtype_data->ecclayout_4k;
 
        if (this->ecc.mode == NAND_ECC_HW) {
                if (nfc_is_v1())
@@ -1232,9 +1493,19 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                        this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
        }
 
+       /* second phase scan */
+       if (nand_scan_tail(mtd)) {
+               err = -ENXIO;
+               goto escan;
+       }
+
        /* Register the partitions */
-       mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
-                                 pdata->nr_parts);
+       mtd_device_parse_register(mtd, part_probes,
+                       &(struct mtd_part_parser_data){
+                               .of_node = pdev->dev.of_node,
+                       },
+                       host->pdata.parts,
+                       host->pdata.nr_parts);
 
        platform_set_drvdata(pdev, host);
 
@@ -1275,6 +1546,8 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
 static struct platform_driver mxcnd_driver = {
        .driver = {
                   .name = DRIVER_NAME,
+                  .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(mxcnd_dt_ids),
        },
        .remove = __devexit_p(mxcnd_remove),
 };
index 47b19c0bb070e3da612b031848496f7551c2589f..a11253a0fcabd6ef7362b9fcae4972ba06fc4966 100644 (file)
@@ -1066,15 +1066,17 @@ EXPORT_SYMBOL(nand_lock);
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                             uint8_t *buf, int page)
+                             uint8_t *buf, int oob_required, int page)
 {
        chip->read_buf(mtd, buf, mtd->writesize);
-       chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
        return 0;
 }
 
@@ -1083,13 +1085,14 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * We need a special oob layout and handling even when OOB isn't used.
  */
 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
-                                       struct nand_chip *chip,
-                                       uint8_t *buf, int page)
+                                      struct nand_chip *chip, uint8_t *buf,
+                                      int oob_required, int page)
 {
        int eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1126,10 +1129,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  */
 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1138,8 +1142,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
        uint8_t *ecc_calc = chip->buffers->ecccalc;
        uint8_t *ecc_code = chip->buffers->ecccode;
        uint32_t *eccpos = chip->ecc.layout->eccpos;
+       unsigned int max_bitflips = 0;
 
-       chip->ecc.read_page_raw(mtd, chip, buf, page);
+       chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
 
        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
                chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1154,12 +1159,14 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
                int stat;
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1180,6 +1187,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
        int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
        int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
        int index = 0;
+       unsigned int max_bitflips = 0;
 
        /* Column address within the page aligned to ECC size (256bytes) */
        start_step = data_offs / chip->ecc.size;
@@ -1244,12 +1252,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
 
                stat = chip->ecc.correct(mtd, p,
                        &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1257,12 +1267,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Not for syndrome calculating ECC controllers which need a special oob layout.
  */
 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1271,6 +1282,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
        uint8_t *ecc_calc = chip->buffers->ecccalc;
        uint8_t *ecc_code = chip->buffers->ecccode;
        uint32_t *eccpos = chip->ecc.layout->eccpos;
+       unsigned int max_bitflips = 0;
 
        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
                chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1289,12 +1301,14 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
                int stat;
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1302,6 +1316,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Hardware ECC for large page chips, require OOB to be read first. For this
@@ -1311,7 +1326,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  * the data area, by overwriting the NAND manufacturer bad block markings.
  */
 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
-       struct nand_chip *chip, uint8_t *buf, int page)
+       struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1320,6 +1335,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
        uint8_t *ecc_code = chip->buffers->ecccode;
        uint32_t *eccpos = chip->ecc.layout->eccpos;
        uint8_t *ecc_calc = chip->buffers->ecccalc;
+       unsigned int max_bitflips = 0;
 
        /* Read the OOB area first */
        chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1337,12 +1353,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
                chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1350,19 +1368,21 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * The hw generator calculates the error syndrome automatically. Therefore we
  * need a special oob layout and handling.
  */
 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-                                  uint8_t *buf, int page)
+                                  uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
        int eccsteps = chip->ecc.steps;
        uint8_t *p = buf;
        uint8_t *oob = chip->oob_poi;
+       unsigned int max_bitflips = 0;
 
        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
                int stat;
@@ -1379,10 +1399,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
                chip->read_buf(mtd, oob, eccbytes);
                stat = chip->ecc.correct(mtd, p, oob, NULL);
 
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
 
                oob += eccbytes;
 
@@ -1397,7 +1419,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
        if (i)
                chip->read_buf(mtd, oob, i);
 
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1459,11 +1481,9 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                            struct mtd_oob_ops *ops)
 {
-       int chipnr, page, realpage, col, bytes, aligned;
+       int chipnr, page, realpage, col, bytes, aligned, oob_required;
        struct nand_chip *chip = mtd->priv;
        struct mtd_ecc_stats stats;
-       int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
-       int sndcmd = 1;
        int ret = 0;
        uint32_t readlen = ops->len;
        uint32_t oobreadlen = ops->ooblen;
@@ -1471,6 +1491,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                mtd->oobavail : mtd->oobsize;
 
        uint8_t *bufpoi, *oob, *buf;
+       unsigned int max_bitflips = 0;
 
        stats = mtd->ecc_stats;
 
@@ -1484,6 +1505,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
 
        buf = ops->datbuf;
        oob = ops->oobbuf;
+       oob_required = oob ? 1 : 0;
 
        while (1) {
                bytes = min(mtd->writesize - col, readlen);
@@ -1493,21 +1515,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                if (realpage != chip->pagebuf || oob) {
                        bufpoi = aligned ? buf : chip->buffers->databuf;
 
-                       if (likely(sndcmd)) {
-                               chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-                               sndcmd = 0;
-                       }
+                       chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
 
-                       /* Now read the page into the buffer */
+                       /*
+                        * Now read the page into the buffer.  Absent an error,
+                        * the read methods return max bitflips per ecc step.
+                        */
                        if (unlikely(ops->mode == MTD_OPS_RAW))
-                               ret = chip->ecc.read_page_raw(mtd, chip,
-                                                             bufpoi, page);
+                               ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+                                                             oob_required,
+                                                             page);
                        else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
                                ret = chip->ecc.read_subpage(mtd, chip,
                                                        col, bytes, bufpoi);
                        else
                                ret = chip->ecc.read_page(mtd, chip, bufpoi,
-                                                         page);
+                                                         oob_required, page);
                        if (ret < 0) {
                                if (!aligned)
                                        /* Invalidate page cache */
@@ -1515,22 +1538,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                                break;
                        }
 
+                       max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
                        /* Transfer not aligned data */
                        if (!aligned) {
                                if (!NAND_SUBPAGE_READ(chip) && !oob &&
                                    !(mtd->ecc_stats.failed - stats.failed) &&
-                                   (ops->mode != MTD_OPS_RAW))
+                                   (ops->mode != MTD_OPS_RAW)) {
                                        chip->pagebuf = realpage;
-                               else
+                                       chip->pagebuf_bitflips = ret;
+                               } else {
                                        /* Invalidate page cache */
                                        chip->pagebuf = -1;
+                               }
                                memcpy(buf, chip->buffers->databuf + col, bytes);
                        }
 
                        buf += bytes;
 
                        if (unlikely(oob)) {
-
                                int toread = min(oobreadlen, max_oobsize);
 
                                if (toread) {
@@ -1541,13 +1567,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                        }
 
                        if (!(chip->options & NAND_NO_READRDY)) {
-                               /*
-                                * Apply delay or wait for ready/busy pin. Do
-                                * this before the AUTOINCR check, so no
-                                * problems arise if a chip which does auto
-                                * increment is marked as NOAUTOINCR by the
-                                * board driver.
-                                */
+                               /* Apply delay or wait for ready/busy pin */
                                if (!chip->dev_ready)
                                        udelay(chip->chip_delay);
                                else
@@ -1556,6 +1576,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                } else {
                        memcpy(buf, chip->buffers->databuf + col, bytes);
                        buf += bytes;
+                       max_bitflips = max_t(unsigned int, max_bitflips,
+                                            chip->pagebuf_bitflips);
                }
 
                readlen -= bytes;
@@ -1575,26 +1597,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                        chip->select_chip(mtd, -1);
                        chip->select_chip(mtd, chipnr);
                }
-
-               /*
-                * Check, if the chip supports auto page increment or if we
-                * have hit a block boundary.
-                */
-               if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
-                       sndcmd = 1;
        }
 
        ops->retlen = ops->len - (size_t) readlen;
        if (oob)
                ops->oobretlen = ops->ooblen - oobreadlen;
 
-       if (ret)
+       if (ret < 0)
                return ret;
 
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
 
-       return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+       return max_bitflips;
 }
 
 /**
@@ -1630,17 +1645,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
-                            int page, int sndcmd)
+                            int page)
 {
-       if (sndcmd) {
-               chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-               sndcmd = 0;
-       }
+       chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-       return sndcmd;
+       return 0;
 }
 
 /**
@@ -1649,10 +1660,9 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-                                 int page, int sndcmd)
+                                 int page)
 {
        uint8_t *buf = chip->oob_poi;
        int length = mtd->oobsize;
@@ -1679,7 +1689,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
        if (length > 0)
                chip->read_buf(mtd, bufpoi, length);
 
-       return 1;
+       return 0;
 }
 
 /**
@@ -1775,13 +1785,13 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
                            struct mtd_oob_ops *ops)
 {
-       int page, realpage, chipnr, sndcmd = 1;
+       int page, realpage, chipnr;
        struct nand_chip *chip = mtd->priv;
        struct mtd_ecc_stats stats;
-       int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
        int readlen = ops->ooblen;
        int len;
        uint8_t *buf = ops->oobbuf;
+       int ret = 0;
 
        pr_debug("%s: from = 0x%08Lx, len = %i\n",
                        __func__, (unsigned long long)from, readlen);
@@ -1817,20 +1827,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
 
        while (1) {
                if (ops->mode == MTD_OPS_RAW)
-                       sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+                       ret = chip->ecc.read_oob_raw(mtd, chip, page);
                else
-                       sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+                       ret = chip->ecc.read_oob(mtd, chip, page);
+
+               if (ret < 0)
+                       break;
 
                len = min(len, readlen);
                buf = nand_transfer_oob(chip, buf, ops, len);
 
                if (!(chip->options & NAND_NO_READRDY)) {
-                       /*
-                        * Apply delay or wait for ready/busy pin. Do this
-                        * before the AUTOINCR check, so no problems arise if a
-                        * chip which does auto increment is marked as
-                        * NOAUTOINCR by the board driver.
-                        */
+                       /* Apply delay or wait for ready/busy pin */
                        if (!chip->dev_ready)
                                udelay(chip->chip_delay);
                        else
@@ -1851,16 +1859,12 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
                        chip->select_chip(mtd, -1);
                        chip->select_chip(mtd, chipnr);
                }
-
-               /*
-                * Check, if the chip supports auto page increment or if we
-                * have hit a block boundary.
-                */
-               if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
-                       sndcmd = 1;
        }
 
-       ops->oobretlen = ops->ooblen;
+       ops->oobretlen = ops->ooblen - readlen;
+
+       if (ret < 0)
+               return ret;
 
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
@@ -1919,14 +1923,16 @@ out:
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                               const uint8_t *buf)
+                               const uint8_t *buf, int oob_required)
 {
        chip->write_buf(mtd, buf, mtd->writesize);
-       chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
 }
 
 /**
@@ -1934,12 +1940,13 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * We need a special oob layout and handling even when ECC isn't checked.
  */
 static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
                                        struct nand_chip *chip,
-                                       const uint8_t *buf)
+                                       const uint8_t *buf, int oob_required)
 {
        int eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1973,9 +1980,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  */
 static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                 const uint8_t *buf)
+                                 const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1991,7 +1999,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
        for (i = 0; i < chip->ecc.total; i++)
                chip->oob_poi[eccpos[i]] = ecc_calc[i];
 
-       chip->ecc.write_page_raw(mtd, chip, buf);
+       chip->ecc.write_page_raw(mtd, chip, buf, 1);
 }
 
 /**
@@ -1999,9 +2007,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  */
 static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                 const uint8_t *buf)
+                                 const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -2027,12 +2036,14 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * The hw generator calculates the error syndrome automatically. Therefore we
  * need a special oob layout and handling.
  */
 static void nand_write_page_syndrome(struct mtd_info *mtd,
-                                   struct nand_chip *chip, const uint8_t *buf)
+                                   struct nand_chip *chip,
+                                   const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -2071,21 +2082,23 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
  * @mtd: MTD device structure
  * @chip: NAND chip descriptor
  * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
  * @page: page number to write
  * @cached: cached programming
  * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-                          const uint8_t *buf, int page, int cached, int raw)
+                          const uint8_t *buf, int oob_required, int page,
+                          int cached, int raw)
 {
        int status;
 
        chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
        if (unlikely(raw))
-               chip->ecc.write_page_raw(mtd, chip, buf);
+               chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
        else
-               chip->ecc.write_page(mtd, chip, buf);
+               chip->ecc.write_page(mtd, chip, buf, oob_required);
 
        /*
         * Cached progamming disabled for now. Not sure if it's worth the
@@ -2118,6 +2131,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 
        if (chip->verify_buf(mtd, buf, mtd->writesize))
                return -EIO;
+
+       /* Make sure the next page prog is preceded by a status read */
+       chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
 #endif
        return 0;
 }
@@ -2202,6 +2218,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
        uint8_t *oob = ops->oobbuf;
        uint8_t *buf = ops->datbuf;
        int ret, subpage;
+       int oob_required = oob ? 1 : 0;
 
        ops->retlen = 0;
        if (!writelen)
@@ -2264,8 +2281,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
                        memset(chip->oob_poi, 0xff, mtd->oobsize);
                }
 
-               ret = chip->write_page(mtd, chip, wbuf, page, cached,
-                                      (ops->mode == MTD_OPS_RAW));
+               ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
+                                      cached, (ops->mode == MTD_OPS_RAW));
                if (ret)
                        break;
 
@@ -2898,8 +2915,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
                *busw = NAND_BUSWIDTH_16;
 
        chip->options &= ~NAND_CHIPOPTIONS_MSK;
-       chip->options |= (NAND_NO_READRDY |
-                       NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+       chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
 
        pr_info("ONFI flash detected\n");
        return 1;
@@ -3076,11 +3092,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
                chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
 ident_done:
 
-       /*
-        * Set chip as a default. Board drivers can override it, if necessary.
-        */
-       chip->options |= NAND_NO_AUTOINCR;
-
        /* Try to identify manufacturer */
        for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
                if (nand_manuf_ids[maf_idx].id == *maf_id)
@@ -3154,10 +3165,11 @@ ident_done:
        if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
                chip->cmdfunc = nand_command_lp;
 
-       pr_info("NAND device: Manufacturer ID:"
-               " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
-               nand_manuf_ids[maf_idx].name,
-               chip->onfi_version ? chip->onfi_params.model : type->name);
+       pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
+               " page size: %d, OOB size: %d\n",
+               *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+               chip->onfi_version ? chip->onfi_params.model : type->name,
+               mtd->writesize, mtd->oobsize);
 
        return type;
 }
@@ -3329,8 +3341,13 @@ int nand_scan_tail(struct mtd_info *mtd)
                if (!chip->ecc.write_oob)
                        chip->ecc.write_oob = nand_write_oob_syndrome;
 
-               if (mtd->writesize >= chip->ecc.size)
+               if (mtd->writesize >= chip->ecc.size) {
+                       if (!chip->ecc.strength) {
+                               pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+                               BUG();
+                       }
                        break;
+               }
                pr_warn("%d byte HW ECC not possible on "
                           "%d byte page size, fallback to SW ECC\n",
                           chip->ecc.size, mtd->writesize);
@@ -3385,7 +3402,7 @@ int nand_scan_tail(struct mtd_info *mtd)
                        BUG();
                }
                chip->ecc.strength =
-                       chip->ecc.bytes*8 / fls(8*chip->ecc.size);
+                       chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
                break;
 
        case NAND_ECC_NONE:
@@ -3483,7 +3500,14 @@ int nand_scan_tail(struct mtd_info *mtd)
 
        /* propagate ecc info to mtd_info */
        mtd->ecclayout = chip->ecc.layout;
-       mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
+       mtd->ecc_strength = chip->ecc.strength;
+       /*
+        * Initialize bitflip_threshold to its default prior scan_bbt() call.
+        * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+        * properly set.
+        */
+       if (!mtd->bitflip_threshold)
+               mtd->bitflip_threshold = mtd->ecc_strength;
 
        /* Check, if we should skip the bad block table scan */
        if (chip->options & NAND_SKIP_BBTSCAN)
index 20a112f591fe3f67347bb206e1aae480d5eeaac6..30d1319ff0657ce75b063023d9eee0c65d2f5873 100644 (file)
@@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
 
                buf += mtd->oobsize + mtd->writesize;
                len -= mtd->writesize;
+               offs += mtd->writesize;
        }
        return 0;
 }
index af4fe8ca7b5ef7fbdfb25bb2553d8af4f61f8d29..621b70b7a159099ffc3060bdf2651a38dc2dae43 100644 (file)
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
         * These are the new chips with large page size. The pagesize and the
         * erasesize is determined from the extended id bytes
         */
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
+#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
 
        /* 512 Megabit */
@@ -157,9 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
         * writes possible, but not implemented now
         */
        {"AND 128MiB 3,3V 8-bit",       0x01, 2048, 128, 0x4000,
-        NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
-        BBT_AUTO_REFRESH
-       },
+        NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
 
        {NULL,}
 };
index 261f478f8cc37944e3365ce8b0219ec073bfcddc..cf0cd3146817f9275706f1d4f61b53724d0d5dc1 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -268,7 +268,6 @@ MODULE_PARM_DESC(bch,                "Enable BCH ecc and set how many bits should "
 #define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
 #define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
 #define OPT_SMARTMEDIA   0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR     0x00000020 /* page number auto incrementation is possible */
 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
 #define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
 #define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -547,12 +546,6 @@ static char *get_partition_name(int i)
        return kstrdup(buf, GFP_KERNEL);
 }
 
-static uint64_t divide(uint64_t n, uint32_t d)
-{
-       do_div(n, d);
-       return n;
-}
-
 /*
  * Initialize the nandsim structure.
  *
@@ -581,7 +574,7 @@ static int init_nandsim(struct mtd_info *mtd)
        ns->geom.oobsz    = mtd->oobsize;
        ns->geom.secsz    = mtd->erasesize;
        ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
-       ns->geom.pgnum    = divide(ns->geom.totsz, ns->geom.pgsz);
+       ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
        ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
        ns->geom.secshift = ffs(ns->geom.secsz) - 1;
        ns->geom.pgshift  = chip->page_shift;
@@ -594,7 +587,7 @@ static int init_nandsim(struct mtd_info *mtd)
                ns->options |= OPT_PAGE256;
        }
        else if (ns->geom.pgsz == 512) {
-               ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+               ns->options |= OPT_PAGE512;
                if (ns->busw == 8)
                        ns->options |= OPT_PAGE512_8BIT;
        } else if (ns->geom.pgsz == 2048) {
@@ -663,8 +656,6 @@ static int init_nandsim(struct mtd_info *mtd)
         for (i = 0; nand_flash_ids[i].name != NULL; i++) {
                 if (second_id_byte != nand_flash_ids[i].id)
                         continue;
-               if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
-                       ns->options |= OPT_AUTOINCR;
        }
 
        if (ns->busw == 16)
@@ -924,7 +915,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
 
        if (!rptwear)
                return 0;
-       wear_eb_count = divide(mtd->size, mtd->erasesize);
+       wear_eb_count = div_u64(mtd->size, mtd->erasesize);
        mem = wear_eb_count * sizeof(unsigned long);
        if (mem / sizeof(unsigned long) != wear_eb_count) {
                NS_ERR("Too many erase blocks for wear reporting\n");
@@ -1936,20 +1927,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
        if (ns->regs.count == ns->regs.num) {
                NS_DBG("read_byte: all bytes were read\n");
 
-               /*
-                * The OPT_AUTOINCR allows to read next consecutive pages without
-                * new read operation cycle.
-                */
-               if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
-                       ns->regs.count = 0;
-                       if (ns->regs.row + 1 < ns->geom.pgnum)
-                               ns->regs.row += 1;
-                       NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
-                       do_state_action(ns, ACTION_CPY);
-               }
-               else if (NS_STATE(ns->nxstate) == STATE_READY)
+               if (NS_STATE(ns->nxstate) == STATE_READY)
                        switch_state(ns);
-
        }
 
        return outb;
@@ -2203,14 +2182,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
        ns->regs.count += len;
 
        if (ns->regs.count == ns->regs.num) {
-               if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
-                       ns->regs.count = 0;
-                       if (ns->regs.row + 1 < ns->geom.pgnum)
-                               ns->regs.row += 1;
-                       NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
-                       do_state_action(ns, ACTION_CPY);
-               }
-               else if (NS_STATE(ns->nxstate) == STATE_READY)
+               if (NS_STATE(ns->nxstate) == STATE_READY)
                        switch_state(ns);
        }
 
index c2b0bba9d8b39607626f091d398a3fccfa8687c4..d7f681d0c9b98e54023cdce755b2febf6a983708 100644 (file)
 #include <linux/io.h>
 #include <linux/slab.h>
 
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+#include <linux/bch.h>
+#endif
+
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
@@ -127,6 +131,11 @@ struct omap_nand_info {
        } iomode;
        u_char                          *buf;
        int                                     buf_len;
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+       struct bch_control             *bch;
+       struct nand_ecclayout           ecclayout;
+#endif
 };
 
 /**
@@ -402,7 +411,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
                        PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
        if (ret)
                /* PFPW engine is busy, use cpu copy method */
-               goto out_copy;
+               goto out_copy_unmap;
 
        init_completion(&info->comp);
 
@@ -421,6 +430,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
        dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
        return 0;
 
+out_copy_unmap:
+       dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
 out_copy:
        if (info->nand.options & NAND_BUSWIDTH_16)
                is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -879,7 +890,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
        struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
                                                        mtd);
        unsigned long timeo = jiffies;
-       int status = NAND_STATUS_FAIL, state = this->state;
+       int status, state = this->state;
 
        if (state == FL_ERASING)
                timeo += (HZ * 400) / 1000;
@@ -894,6 +905,8 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
                        break;
                cond_resched();
        }
+
+       status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
        return status;
 }
 
@@ -925,6 +938,226 @@ static int omap_dev_ready(struct mtd_info *mtd)
        return 1;
 }
 
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+
+/**
+ * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+       int nerrors;
+       unsigned int dev_width;
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       struct nand_chip *chip = mtd->priv;
+
+       nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
+       dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+       /*
+        * Program GPMC to perform correction on one 512-byte sector at a time.
+        * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
+        * gives a slight (5%) performance gain (but requires additional code).
+        */
+       (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
+}
+
+/**
+ * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
+                                   u_char *ecc_code)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
+                                   u_char *ecc_code)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_correct_data_bch - Decode received data and correct errors
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ */
+static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
+                                 u_char *read_ecc, u_char *calc_ecc)
+{
+       int i, count;
+       /* cannot correct more than 8 errors */
+       unsigned int errloc[8];
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+
+       count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
+                          errloc);
+       if (count > 0) {
+               /* correct errors */
+               for (i = 0; i < count; i++) {
+                       /* correct data only, not ecc bytes */
+                       if (errloc[i] < 8*512)
+                               data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
+                       pr_debug("corrected bitflip %u\n", errloc[i]);
+               }
+       } else if (count < 0) {
+               pr_err("ecc unrecoverable error\n");
+       }
+       return count;
+}
+
+/**
+ * omap3_free_bch - Release BCH ecc resources
+ * @mtd: MTD device structure
+ */
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       if (info->bch) {
+               free_bch(info->bch);
+               info->bch = NULL;
+       }
+}
+
+/**
+ * omap3_init_bch - Initialize BCH ECC
+ * @mtd: MTD device structure
+ * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
+ */
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+       int ret, max_errors;
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+#ifdef CONFIG_MTD_NAND_OMAP_BCH8
+       const int hw_errors = 8;
+#else
+       const int hw_errors = 4;
+#endif
+       info->bch = NULL;
+
+       max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+       if (max_errors != hw_errors) {
+               pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
+                      max_errors, hw_errors);
+               goto fail;
+       }
+
+       /* initialize GPMC BCH engine */
+       ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
+       if (ret)
+               goto fail;
+
+       /* software bch library is only used to detect and locate errors */
+       info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
+       if (!info->bch)
+               goto fail;
+
+       info->nand.ecc.size    = 512;
+       info->nand.ecc.hwctl   = omap3_enable_hwecc_bch;
+       info->nand.ecc.correct = omap3_correct_data_bch;
+       info->nand.ecc.mode    = NAND_ECC_HW;
+
+       /*
+        * The number of corrected errors in an ecc block that will trigger
+        * block scrubbing defaults to the ecc strength (4 or 8).
+        * Set mtd->bitflip_threshold here to define a custom threshold.
+        */
+
+       if (max_errors == 8) {
+               info->nand.ecc.strength  = 8;
+               info->nand.ecc.bytes     = 13;
+               info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+       } else {
+               info->nand.ecc.strength  = 4;
+               info->nand.ecc.bytes     = 7;
+               info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+       }
+
+       pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
+       return 0;
+fail:
+       omap3_free_bch(mtd);
+       return -1;
+}
+
+/**
+ * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
+ * @mtd: MTD device structure
+ */
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+       int i, steps;
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       struct nand_ecclayout *layout = &info->ecclayout;
+
+       /* build oob layout */
+       steps = mtd->writesize/info->nand.ecc.size;
+       layout->eccbytes = steps*info->nand.ecc.bytes;
+
+       /* do not bother creating special oob layouts for small page devices */
+       if (mtd->oobsize < 64) {
+               pr_err("BCH ecc is not supported on small page devices\n");
+               goto fail;
+       }
+
+       /* reserve 2 bytes for bad block marker */
+       if (layout->eccbytes+2 > mtd->oobsize) {
+               pr_err("no oob layout available for oobsize %d eccbytes %u\n",
+                      mtd->oobsize, layout->eccbytes);
+               goto fail;
+       }
+
+       /* put ecc bytes at oob tail */
+       for (i = 0; i < layout->eccbytes; i++)
+               layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+       layout->oobfree[0].offset = 2;
+       layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+       info->nand.ecc.layout = layout;
+
+       if (!(info->nand.options & NAND_BUSWIDTH_16))
+               info->nand.badblock_pattern = &bb_descrip_flashbased;
+       return 0;
+fail:
+       omap3_free_bch(mtd);
+       return -1;
+}
+
+#else
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+       pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
+       return -1;
+}
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+       return -1;
+}
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+}
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+
 static int __devinit omap_nand_probe(struct platform_device *pdev)
 {
        struct omap_nand_info           *info;
@@ -1063,6 +1296,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
                info->nand.ecc.hwctl            = omap_enable_hwecc;
                info->nand.ecc.correct          = omap_correct_data;
                info->nand.ecc.mode             = NAND_ECC_HW;
+       } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+                  (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+               err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
+               if (err) {
+                       err = -EINVAL;
+                       goto out_release_mem_region;
+               }
        }
 
        /* DIP switches on some boards change between 8 and 16 bit
@@ -1094,6 +1334,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
                                        (offset + omap_oobinfo.eccbytes);
 
                info->nand.ecc.layout = &omap_oobinfo;
+       } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+                  (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+               /* build OOB layout for BCH ECC correction */
+               err = omap3_init_bch_tail(&info->mtd);
+               if (err) {
+                       err = -EINVAL;
+                       goto out_release_mem_region;
+               }
        }
 
        /* second phase scan */
@@ -1122,6 +1370,7 @@ static int omap_nand_remove(struct platform_device *pdev)
        struct mtd_info *mtd = platform_get_drvdata(pdev);
        struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
                                                        mtd);
+       omap3_free_bch(&info->mtd);
 
        platform_set_drvdata(pdev, NULL);
        if (info->dma_ch != -1)
index 974dbf8251c928842fe528c15777e4cd12efdaa8..1440e51cedccc877108a3922046a4b56fbac40ca 100644 (file)
@@ -155,7 +155,6 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
        chip->ecc.mode = NAND_ECC_SOFT;
 
        /* Enable the following for a flash based bad block table */
-       chip->options = NAND_NO_AUTOINCR;
        chip->bbt_options = NAND_BBT_USE_FLASH;
 
        /* Scan to find existence of the device */
index 6404e6e81b101ad12b405a1cb1d689273b30c447..1bcb520404228ba2d8fe9cec1d29d9a6c535eac7 100644 (file)
@@ -23,14 +23,18 @@ struct plat_nand_data {
        void __iomem            *io_base;
 };
 
+static const char *part_probe_types[] = { "cmdlinepart", NULL };
+
 /*
  * Probe for the NAND device.
  */
 static int __devinit plat_nand_probe(struct platform_device *pdev)
 {
        struct platform_nand_data *pdata = pdev->dev.platform_data;
+       struct mtd_part_parser_data ppdata;
        struct plat_nand_data *data;
        struct resource *res;
+       const char **part_types;
        int err = 0;
 
        if (pdata->chip.nr_chips < 1) {
@@ -75,6 +79,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
        data->chip.select_chip = pdata->ctrl.select_chip;
        data->chip.write_buf = pdata->ctrl.write_buf;
        data->chip.read_buf = pdata->ctrl.read_buf;
+       data->chip.read_byte = pdata->ctrl.read_byte;
        data->chip.chip_delay = pdata->chip.chip_delay;
        data->chip.options |= pdata->chip.options;
        data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -98,8 +103,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
                goto out;
        }
 
-       err = mtd_device_parse_register(&data->mtd,
-                                       pdata->chip.part_probe_types, NULL,
+       part_types = pdata->chip.part_probe_types ? : part_probe_types;
+
+       ppdata.of_node = pdev->dev.of_node;
+       err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
                                        pdata->chip.partitions,
                                        pdata->chip.nr_partitions);
 
@@ -140,12 +147,19 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id plat_nand_match[] = {
+       { .compatible = "gen_nand" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
 static struct platform_driver plat_nand_driver = {
-       .probe          = plat_nand_probe,
-       .remove         = __devexit_p(plat_nand_remove),
-       .driver         = {
-               .name   = "gen_nand",
-               .owner  = THIS_MODULE,
+       .probe  = plat_nand_probe,
+       .remove = __devexit_p(plat_nand_remove),
+       .driver = {
+               .name           = "gen_nand",
+               .owner          = THIS_MODULE,
+               .of_match_table = plat_nand_match,
        },
 };
 
index def50caa6f84b259fd5ed23e702efcd5977317cb..252aaefcacfa2ba0e07d1425672afcaa8c665bdf 100644 (file)
@@ -682,14 +682,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
 }
 
 static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
-               struct nand_chip *chip, const uint8_t *buf)
+               struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
        chip->write_buf(mtd, buf, mtd->writesize);
        chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
 }
 
 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
-               struct nand_chip *chip, uint8_t *buf, int page)
+               struct nand_chip *chip, uint8_t *buf, int oob_required,
+               int page)
 {
        struct pxa3xx_nand_host *host = mtd->priv;
        struct pxa3xx_nand_info *info = host->info_data;
@@ -1004,7 +1005,6 @@ KEEP_CONFIG:
        chip->ecc.size = host->page_size;
        chip->ecc.strength = 1;
 
-       chip->options = NAND_NO_AUTOINCR;
        chip->options |= NAND_NO_READRDY;
        if (host->reg_ndcr & NDCR_DWIDTH_M)
                chip->options |= NAND_BUSWIDTH_16;
index c2040187c813e0084e6dc7da8441c675bbe6cc3e..8cb627751c9c9658c5a3d6ac7c0a9cf1f78fb6af 100644 (file)
@@ -539,14 +539,11 @@ exit:
  * nand_read_oob_syndrome assumes we can send column address - we can't
  */
 static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                            int page, int sndcmd)
+                            int page)
 {
-       if (sndcmd) {
-               chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-               sndcmd = 0;
-       }
+       chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-       return sndcmd;
+       return 0;
 }
 
 /*
@@ -1104,18 +1101,7 @@ static struct pci_driver r852_pci_driver = {
        .driver.pm      = &r852_pm_ops,
 };
 
-static __init int r852_module_init(void)
-{
-       return pci_register_driver(&r852_pci_driver);
-}
-
-static void __exit r852_module_exit(void)
-{
-       pci_unregister_driver(&r852_pci_driver);
-}
-
-module_init(r852_module_init);
-module_exit(r852_module_exit);
+module_pci_driver(r852_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
index e9b2b260de3ae081bbeb2e04bbff4d61902003b3..aa9b8a5e0b8f94b66f6356b4346000feeea0e6d8 100644 (file)
@@ -344,7 +344,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
 }
 
 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -359,14 +359,14 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
                if (flctl->hwecc_cant_correct[i])
                        mtd->ecc_stats.failed++;
                else
-                       mtd->ecc_stats.corrected += 0;
+                       mtd->ecc_stats.corrected += 0; /* FIXME */
        }
 
        return 0;
 }
 
 static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                  const uint8_t *buf)
+                                  const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -881,8 +881,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
        flctl->hwecc = pdata->has_hwecc;
        flctl->holden = pdata->use_holden;
 
-       nand->options = NAND_NO_AUTOINCR;
-
        /* Set address of hardware control function */
        /* 20 us command delay time */
        nand->chip_delay = 20;
index 774c3c26671379a30ef8c3f4e0aec1c57ee3efec..082bcdcd6bcfa3f460e26b68edc183b56aba5947 100644 (file)
@@ -94,17 +94,16 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
        {NULL,}
 };
 
-#define XD_TYPEM       (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
 static struct nand_flash_dev nand_xd_flash_ids[] = {
 
        {"xD 16MiB 3,3V",    0x73, 512, 16, 0x4000, 0},
        {"xD 32MiB 3,3V",    0x75, 512, 32, 0x4000, 0},
        {"xD 64MiB 3,3V",    0x76, 512, 64, 0x4000, 0},
        {"xD 128MiB 3,3V",   0x79, 512, 128, 0x4000, 0},
-       {"xD 256MiB 3,3V",   0x71, 512, 256, 0x4000, XD_TYPEM},
-       {"xD 512MiB 3,3V",   0xdc, 512, 512, 0x4000, XD_TYPEM},
-       {"xD 1GiB 3,3V",     0xd3, 512, 1024, 0x4000, XD_TYPEM},
-       {"xD 2GiB 3,3V",     0xd5, 512, 2048, 0x4000, XD_TYPEM},
+       {"xD 256MiB 3,3V",   0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
+       {"xD 512MiB 3,3V",   0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
+       {"xD 1GiB 3,3V",     0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
+       {"xD 2GiB 3,3V",     0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
        {NULL,}
 };
 
index b3ce12ef359e83777280aff2b6f9fa9d58cc4dc9..7153e0d27101e3eed06ac17addcdbd4734c7ccc3 100644 (file)
@@ -1201,7 +1201,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
 
-       return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+       /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+       return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
 }
 
 /**
@@ -1333,7 +1334,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
 
-       return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+       /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+       return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
 }
 
 /**
index 9f957c2d48e94fbd509c51e9f8d9f06f6fafe894..7c1380305219724bc0837cb6c1dc93320d4087c1 100644 (file)
@@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir;
  */
 int ubi_debugfs_init(void)
 {
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        dfs_rootdir = debugfs_create_dir("ubi", NULL);
        if (IS_ERR_OR_NULL(dfs_rootdir)) {
                int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -281,7 +284,8 @@ int ubi_debugfs_init(void)
  */
 void ubi_debugfs_exit(void)
 {
-       debugfs_remove(dfs_rootdir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove(dfs_rootdir);
 }
 
 /* Read an UBI debugfs file */
@@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
        struct dentry *dent;
        struct ubi_debug_info *d = ubi->dbg;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
                     ubi->ubi_num);
        if (n == UBI_DFS_DIR_LEN) {
@@ -470,5 +477,6 @@ out:
  */
 void ubi_debugfs_exit_dev(struct ubi_device *ubi)
 {
-       debugfs_remove_recursive(ubi->dbg->dfs_dir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(ubi->dbg->dfs_dir);
 }
index 9df100a4ec3886b6f6b58893f3ed772235da055e..b6be644e7b85f5194c9f0e1303ffa4d2a51e9cf6 100644 (file)
@@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
        dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
               vol_id, lnum, ubi->works_count);
 
-       down_write(&ubi->work_sem);
        while (found) {
                struct ubi_work *wrk;
                found = 0;
 
+               down_read(&ubi->work_sem);
                spin_lock(&ubi->wl_lock);
                list_for_each_entry(wrk, &ubi->works, list) {
                        if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
                                spin_unlock(&ubi->wl_lock);
 
                                err = wrk->func(ubi, wrk, 0);
-                               if (err)
-                                       goto out;
+                               if (err) {
+                                       up_read(&ubi->work_sem);
+                                       return err;
+                               }
+
                                spin_lock(&ubi->wl_lock);
                                found = 1;
                                break;
                        }
                }
                spin_unlock(&ubi->wl_lock);
+               up_read(&ubi->work_sem);
        }
 
-out:
+       /*
+        * Make sure all the works which have been done in parallel are
+        * finished.
+        */
+       down_write(&ubi->work_sem);
        up_write(&ubi->work_sem);
+
        return err;
 }
 
index 3680aa251dea953d172e224a02b91b713f2dba22..2cf084eb9d524d995c4e3bf72a6d327e5f1ea9cb 100644 (file)
@@ -6,7 +6,7 @@
 #include "bonding.h"
 #include "bond_alb.h"
 
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
index 2ee8cf9e8a3b9fe8e728e1bc6d2334793712deb6..2ee76993f052ce517b2f65e92b3dd16b89ebc758 100644 (file)
@@ -76,6 +76,7 @@
 #include <net/route.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/pkt_sched.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
        return next;
 }
 
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
 
-       skb->queue_mapping = bond_queue_mapping(skb);
+       BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+                    sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
+       skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
 
        if (unlikely(netpoll_tx_running(slave_dev)))
                bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -3226,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
        switch (event) {
        case NETDEV_CHANGENAME:
                return bond_event_changename(event_bond);
+       case NETDEV_UNREGISTER:
+               bond_remove_proc_entry(event_bond);
+               break;
+       case NETDEV_REGISTER:
+               bond_create_proc_entry(event_bond);
+               break;
        default:
                break;
        }
@@ -4171,7 +4178,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
        /*
         * Save the original txq to restore before passing to the driver
         */
-       bond_queue_mapping(skb) = skb->queue_mapping;
+       qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
 
        if (unlikely(txq >= dev->real_num_tx_queues)) {
                do {
@@ -4410,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
 
        bond_work_cancel_all(bond);
 
-       bond_remove_proc_entry(bond);
-
        bond_debug_unregister(bond);
 
        __hw_addr_flush(&bond->mc_list);
@@ -4813,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
 
        bond_set_lockdep_class(bond_dev);
 
-       bond_create_proc_entry(bond);
        list_add_tail(&bond->bond_list, &bn->dev_list);
 
        bond_prepare_sysfs_group(bond);
index ad284baafe87df64599c3335106a31aa0d6b3ec0..3cea38d373446826b7163974ed8e0b0836d19dc3 100644 (file)
@@ -150,14 +150,25 @@ static void bond_info_show_master(struct seq_file *seq)
        }
 }
 
+static const char *bond_slave_link_status(s8 link)
+{
+       static const char * const status[] = {
+               [BOND_LINK_UP] = "up",
+               [BOND_LINK_FAIL] = "going down",
+               [BOND_LINK_DOWN] = "down",
+               [BOND_LINK_BACK] = "going back",
+       };
+
+       return status[link];
+}
+
 static void bond_info_show_slave(struct seq_file *seq,
                                 const struct slave *slave)
 {
        struct bonding *bond = seq->private;
 
        seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
-       seq_printf(seq, "MII Status: %s\n",
-                  (slave->link == BOND_LINK_UP) ?  "up" : "down");
+       seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
        if (slave->speed == SPEED_UNKNOWN)
                seq_printf(seq, "Speed: %s\n", "Unknown");
        else
index aef42f045320ae86eed4014835f90ccd786d1586..485bedb8278c1cd7bc7f85322232caced433c3f6 100644 (file)
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
                }
        }
 
-       pr_info("%s: Unable to set %.*s as primary slave.\n",
-               bond->dev->name, (int)strlen(buf) - 1, buf);
+       strncpy(bond->params.primary, ifname, IFNAMSIZ);
+       bond->params.primary[IFNAMSIZ - 1] = 0;
+
+       pr_info("%s: Recording %s as primary, "
+               "but it has not been enslaved to %s yet.\n",
+               bond->dev->name, ifname, bond->dev->name);
 out:
        write_unlock_bh(&bond->curr_slave_lock);
        read_unlock(&bond->lock);
index 1520814c77c7d1a4a94b0569069abb66cb356b51..4a27adb7ae67f7011ba1ae4bfae7784405435178 100644 (file)
@@ -693,8 +693,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
                         */
                        memcpy(rx_buf, (u8 *)piggy_desc,
                                        CFHSI_DESC_SHORT_SZ);
-                       /* Mark no embedded frame here */
-                       piggy_desc->offset = 0;
                        if (desc_pld_len == -EPROTO)
                                goto out_of_sync;
                }
@@ -737,6 +735,8 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
                        /* Extract any payload in piggyback descriptor. */
                        if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
                                goto out_of_sync;
+                       /* Mark no embedded frame after extracting it */
+                       piggy_desc->offset = 0;
                }
        }
 
@@ -1178,6 +1178,7 @@ int cfhsi_probe(struct platform_device *pdev)
                dev_err(&ndev->dev, "%s: Registration error: %d.\n",
                        __func__, res);
                free_netdev(ndev);
+               return -ENODEV;
        }
        /* Add CAIF HSI device to list. */
        spin_lock(&cfhsi_list_lock);
index 536bda072a1677a18a396125f7df714c172ced54..86cd532c78f90f9e4f42e8e795f2b8f1423cf424 100644 (file)
@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
        priv->write_reg(priv, &priv->regs->control,
                        CONTROL_ENABLE_AR);
 
-       if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
-                                       CAN_CTRLMODE_LOOPBACK)) {
+       if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+           (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
                /* loopback + silent mode : useful for hot self-test */
                priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
                                CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
  *
  * We iterate from priv->tx_echo to priv->tx_next and check if the
  * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
  */
 static void c_can_do_tx(struct net_device *dev)
 {
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
        for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
                msg_obj_no = get_tx_echo_msg_obj(priv);
                val = c_can_read_reg32(priv, &priv->regs->txrqst1);
-               if (!(val & (1 << msg_obj_no))) {
+               if (!(val & (1 << (msg_obj_no - 1)))) {
                        can_get_echo_skb(dev,
                                        msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
                        stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
                                        & IF_MCONT_DLC_MASK;
                        stats->tx_packets++;
                        c_can_inval_msg_object(dev, 0, msg_obj_no);
+               } else {
+                       break;
                }
        }
 
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
        struct net_device *dev = napi->dev;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       irqstatus = priv->irqstatus;
        if (!irqstatus)
                goto end;
 
@@ -1028,12 +1030,11 @@ end:
 
 static irqreturn_t c_can_isr(int irq, void *dev_id)
 {
-       u16 irqstatus;
        struct net_device *dev = (struct net_device *)dev_id;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
-       if (!irqstatus)
+       priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!priv->irqstatus)
                return IRQ_NONE;
 
        /* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
                goto exit_irq_fail;
        }
 
+       napi_enable(&priv->napi);
+
        /* start the c_can controller */
        c_can_start(dev);
 
-       napi_enable(&priv->napi);
        netif_start_queue(dev);
 
        return 0;
index 9b7fbef3d09a1248cda69974ac9c3cc4cf9e464e..5f32d34af507e7a9d51c4b37f8add3cd6ff0ddcb 100644 (file)
@@ -76,6 +76,7 @@ struct c_can_priv {
        unsigned int tx_next;
        unsigned int tx_echo;
        void *priv;             /* for board-specific data */
+       u16 irqstatus;
 };
 
 struct net_device *alloc_c_can_dev(void);
index 53115eee80758fd99e45a5a6258986fa81b9f8d5..688371cda37afc51ff125efa547e819126e4ca24 100644 (file)
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
        struct cc770_platform_data *pdata = pdev->dev.platform_data;
 
        priv->can.clock.freq = pdata->osc_freq;
-       if (priv->cpu_interface | CPUIF_DSC)
+       if (priv->cpu_interface & CPUIF_DSC)
                priv->can.clock.freq /= 2;
        priv->clkout = pdata->cor;
        priv->bus_config = pdata->bcr;
index 38c0690df5c8ae9ae283bde27eb523ca55bf8b19..81d474102378dddf0fee978f66c08c4634077d81 100644 (file)
@@ -939,12 +939,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
                return PTR_ERR(pinctrl);
 
        if (pdev->dev.of_node) {
-               const u32 *clock_freq_p;
+               const __be32 *clock_freq_p;
 
                clock_freq_p = of_get_property(pdev->dev.of_node,
                                                "clock-frequency", NULL);
                if (clock_freq_p)
-                       clock_freq = *clock_freq_p;
+                       clock_freq = be32_to_cpup(clock_freq_p);
        }
 
        if (!clock_freq) {
index 442d91a2747b9d8136dd5809ec3d192fc8c1e04c..bab0158f1cc3180f112c3d296cb5ffb22bddeb0d 100644 (file)
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
        rtnl_lock();
        err = __rtnl_link_register(&dummy_link_ops);
 
-       for (i = 0; i < numdummies && !err; i++)
+       for (i = 0; i < numdummies && !err; i++) {
                err = dummy_init_one();
+               cond_resched();
+       }
        if (err < 0)
                __rtnl_link_unregister(&dummy_link_ops);
        rtnl_unlock();
index 9cc15701101b8cfd98ae12799244089daec15c2e..1f78b63d5efe514d82472eaac9d1a3567c3a2981 100644 (file)
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
        if ((phy_data & BMSR_LSTATUS) == 0) {
                /* link down */
                netif_carrier_off(netdev);
-               netif_stop_queue(netdev);
                hw->hibernate = true;
                if (atl1c_reset_mac(hw) != 0)
                        if (netif_msg_hw(adapter))
index 46b8b7d81633eaa95fb4755a2225494a8b93856a..d09c6b583d17b2570dffdcabc0a4766722af9bf7 100644 (file)
@@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
                        dma_unmap_single(bp->sdev->dma_dev, mapping,
                                             RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
-               skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+               skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
                if (skb == NULL)
                        return -ENOMEM;
                mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        dma_unmap_single(bp->sdev->dma_dev, mapping, len,
                                             DMA_TO_DEVICE);
 
-               bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+               bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb)
                        goto err_out;
 
index ac7b74488531be6f59ee885c23386b01eabd60f5..1fa4927a45b1dadbd6da8fa764792c57c3c67cf0 100644 (file)
@@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        int k, last;
 
                        if (skb == NULL) {
-                               j++;
+                               j = NEXT_TX_BD(j);
                                continue;
                        }
 
@@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        tx_buf->skb = NULL;
 
                        last = tx_buf->nr_frags;
-                       j++;
-                       for (k = 0; k < last; k++, j++) {
+                       j = NEXT_TX_BD(j);
+                       for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
                                dma_unmap_page(&bp->pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
index e30e2a2f354c8fc30f2c59750c5b1ce63fa66676..7de824184979788b2f740b6e824c271c60ff45d9 100644 (file)
@@ -747,21 +747,6 @@ struct bnx2x_fastpath {
 
 #define ETH_RX_ERROR_FALGS             ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 
-#define BNX2X_IP_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
-                       (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
 #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
                                (((le16_to_cpu(flags) & \
                                   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
index ad0743bf4bdece7ac3cbc17e98f881e92530b22f..8098eea9704df6ffea4b904ab173ce88ef6f1016 100644 (file)
@@ -190,7 +190,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 
                if ((netif_tx_queue_stopped(txq)) &&
                    (bp->state == BNX2X_STATE_OPEN) &&
-                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
                        netif_tx_wake_queue(txq);
 
                __netif_tx_unlock(txq);
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
        return 0;
 }
 
+static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+                               struct bnx2x_fastpath *fp)
+{
+       /* Do nothing if no IP/L4 csum validation was done */
+
+       if (cqe->fast_path_cqe.status_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+               return;
+
+       /* If both IP/L4 validation were done, check if an error was found. */
+
+       if (cqe->fast_path_cqe.type_error_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+               fp->eth_q_stats.hw_csum_err++;
+       else
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -806,13 +825,9 @@ reuse_rx:
 
                skb_checksum_none_assert(skb);
 
-               if (bp->dev->features & NETIF_F_RXCSUM) {
+               if (bp->dev->features & NETIF_F_RXCSUM)
+                       bnx2x_csum_validate(skb, cqe, fp);
 
-                       if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       else
-                               fp->eth_q_stats.hw_csum_err++;
-               }
 
                skb_record_rx_queue(skb, fp->rx_queue);
 
@@ -2501,8 +2516,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
 /* we split the first BD into headers and data BDs
  * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
  */
 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
                                   struct bnx2x_fp_txdata *txdata,
@@ -3156,7 +3169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        txdata->tx_bd_prod += nbd;
 
-       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
                netif_tx_stop_queue(txq);
 
                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3165,7 +3178,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                smp_mb();
 
                fp->eth_q_stats.driver_xoff++;
-               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
                        netif_tx_wake_queue(txq);
        }
        txdata->tx_pkt++;
index a3fb7215cd8910846606e8b9ddeaee7ba0c28b89..6e7d5c0843b4c4bd825b0cd505b489011fd748bd 100644 (file)
@@ -40,6 +40,7 @@
 #define I2C_BSC0                       0
 #define I2C_BSC1                       1
 #define I2C_WA_RETRY_CNT               3
+#define I2C_WA_PWR_ITER                        (I2C_WA_RETRY_CNT - 1)
 #define MCPR_IMC_COMMAND_READ_OP       1
 #define MCPR_IMC_COMMAND_WRITE_OP      2
 
@@ -7659,6 +7660,28 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        return -EINVAL;
 }
 
+static void bnx2x_warpcore_power_module(struct link_params *params,
+                                       struct bnx2x_phy *phy,
+                                       u8 power)
+{
+       u32 pin_cfg;
+       struct bnx2x *bp = params->bp;
+
+       pin_cfg = (REG_RD(bp, params->shmem_base +
+                         offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+                       PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+                       PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+       if (pin_cfg == PIN_CFG_NA)
+               return;
+       DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+                      power, pin_cfg);
+       /* Low ==> corresponding SFP+ module is powered
+        * high ==> the SFP+ module is powered down
+        */
+       bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
 static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                                                 struct link_params *params,
                                                 u16 addr, u8 byte_cnt,
@@ -7678,6 +7701,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* 4 byte aligned address */
        addr32 = addr & (~0x3);
        do {
+               if (cnt == I2C_WA_PWR_ITER) {
+                       bnx2x_warpcore_power_module(params, phy, 0);
+                       /* Note that 100us are not enough here */
+                       usleep_range(1000,1000);
+                       bnx2x_warpcore_power_module(params, phy, 1);
+               }
                rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
                                    data_array);
        } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -8200,29 +8229,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
                bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
 }
 
-static void bnx2x_warpcore_power_module(struct link_params *params,
-                                       struct bnx2x_phy *phy,
-                                       u8 power)
-{
-       u32 pin_cfg;
-       struct bnx2x *bp = params->bp;
-
-       pin_cfg = (REG_RD(bp, params->shmem_base +
-                         offsetof(struct shmem_region,
-                       dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
-                       PORT_HW_CFG_E3_PWR_DIS_MASK) >>
-                       PORT_HW_CFG_E3_PWR_DIS_SHIFT;
-
-       if (pin_cfg == PIN_CFG_NA)
-               return;
-       DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
-                      power, pin_cfg);
-       /* Low ==> corresponding SFP+ module is powered
-        * high ==> the SFP+ module is powered down
-        */
-       bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
-}
-
 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
                                    struct link_params *params)
 {
@@ -9748,7 +9754,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        msleep(1);
 
-       if (!(CHIP_IS_E1(bp)))
+       if (!(CHIP_IS_E1x(bp)))
                port = BP_PATH(bp);
        else
                port = params->port;
index c95e7b5e2b85589db86db943c3c77b697394312b..2c89d17cbb292cfc3e971dbf180b95728d8e06b7 100644 (file)
@@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
        }
 
        if (atomic_read(&ulp_ops->ref_count) != 0)
-               netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+               pr_warn("%s: Failed waiting for ref count to go to zero\n",
+                       __func__);
        return 0;
 
 out_unlock:
@@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
 
        uinfo = &udev->cnic_uinfo;
 
-       uinfo->mem[0].addr = dev->netdev->base_addr;
+       uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
        uinfo->mem[0].internal_addr = dev->regview;
-       uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
        uinfo->mem[0].memtype = UIO_MEM_PHYS;
 
        if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+               uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+                                                    TX_MAX_TSS_RINGS + 1);
                uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
                                        PAGE_MASK;
                if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
 
                uinfo->name = "bnx2_cnic";
        } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+               uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
                uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
                        PAGE_MASK;
                uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
index edeeb516807a1399ceb8f8a9846bbd105fcce7c3..e47ff8be1d7b5c27be543c7d41f4584d56054644 100644 (file)
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                }
        }
 
-       if (tg3_flag(tp, 5755_PLUS))
+       if (tg3_flag(tp, 5755_PLUS) ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
index 8d06ea381741cca9178534c843838452872d1cf3..921c2082af4cccf58c808601126a63410aab08b4 100644 (file)
@@ -122,15 +122,15 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        goto done;
 
                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
-                       dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
-                               "permitted to execute this cmd (opcode %d)\n",
-                               opcode);
+                       dev_warn(&adapter->pdev->dev,
+                                "opcode %d-%d is not permitted\n",
+                                opcode, subsystem);
                } else {
                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
                                        CQE_STATUS_EXTD_MASK;
-                       dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
-                               "status %d, extd-status %d\n",
-                               opcode, compl_status, extd_status);
+                       dev_err(&adapter->pdev->dev,
+                               "opcode %d-%d failed:status %d-%d\n",
+                               opcode, subsystem, compl_status, extd_status);
                }
        }
 done:
index 9625bf420c161efb92ccc89c16b18b1fa4c90c68..b3f3fc3d132374207aa66fa6844d91374371c05b 100644 (file)
@@ -1566,7 +1566,7 @@ struct be_hw_stats_v1 {
        u32 rsvd0[BE_TXP_SW_SZ];
        struct be_erx_stats_v1 erx;
        struct be_pmem_stats pmem;
-       u32 rsvd1[3];
+       u32 rsvd1[18];
 };
 
 struct be_cmd_req_get_stats_v1 {
index 08efd308d78ae40640f73953a41531aabba2c054..501dfa9c88ec4cfad5ea5b642f6e78decbb8f731 100644 (file)
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
        if (copied) {
+               int gso_segs = skb_shinfo(skb)->gso_segs;
+
                /* record the sent skb in the sent_skb table */
                BUG_ON(txo->sent_skb_list[start]);
                txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
                be_txq_notify(adapter, txq->id, wrb_cnt);
 
-               be_tx_stats_update(txo, wrb_cnt, copied,
-                               skb_shinfo(skb)->gso_segs, stopped);
+               be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
                dev_kfree_skb_any(skb);
@@ -3236,7 +3237,7 @@ static void be_netdev_init(struct net_device *netdev)
 
        netdev->flags |= IFF_MULTICAST;
 
-       netif_set_gso_max_size(netdev, 65535);
+       netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
 
        netdev->netdev_ops = &be_netdev_ops;
 
index 97f947b3d94af9c6b50811ca1b527b34307735c5..2933d08b036edc437b0e026ed55404fa34c8b4fb 100644 (file)
@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
                length = status & BCOM_FEC_RX_BD_LEN_MASK;
                skb_put(rskb, length - 4);      /* length without CRC32 */
                rskb->protocol = eth_type_trans(rskb, dev);
-               if (!skb_defer_rx_timestamp(skb))
+               if (!skb_defer_rx_timestamp(rskb))
                        netif_rx(rskb);
 
                spin_lock(&priv->lock);
index 0741aded9eb057bbc2454a220bd6a19318e975fa..ab1d80ff0791c36f937b0c06c4a52c33df340591 100644 (file)
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
-                       if (likely(priv->tx_queue[i]->txcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->tx_queue[i]->txcoalescing))
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
-                       }
                }
 
                baddr = &regs->rxic0;
                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
-                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->rx_queue[i]->rxcoalescing))
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
-                       }
                }
        }
 }
@@ -2065,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        return NETDEV_TX_OK;
                }
 
-               /* Steal sock reference for processing TX time stamps */
-               swap(skb_new->sk, skb->sk);
-               swap(skb_new->destructor, skb->destructor);
-               kfree_skb(skb);
+               if (skb->sk)
+                       skb_set_owner_w(skb_new, skb->sk);
+               consume_skb(skb);
                skb = skb_new;
        }
 
index 79b07ec6726f93f8eb59f07ef2c97d11b828ce95..0cafe4fe9406125af6375cd8859b1919ab0d62d4 100644 (file)
@@ -122,8 +122,10 @@ config IGB_DCA
 
 config IGB_PTP
        bool "PTP Hardware Clock (PHC)"
-       default y
-       depends on IGB && PTP_1588_CLOCK
+       default n
+       depends on IGB && EXPERIMENTAL
+       select PPS
+       select PTP_1588_CLOCK
        ---help---
          Say Y here if you want to use PTP Hardware Clock (PHC) in the
          driver.  Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@ config IXGBE_DCB
 config IXGBE_PTP
        bool "PTP Clock Support"
        default n
-       depends on IXGBE && PTP_1588_CLOCK
+       depends on IXGBE && EXPERIMENTAL
+       select PPS
+       select PTP_1588_CLOCK
        ---help---
          Say Y here if you want support for 1588 Timestamping with a
          PHC device, using the PTP 1588 Clock support. This is
index 95731c8410447f2db3fe8ad1d643c625b7a59b56..7483ca0a6282f8e2111c44ca67a0518c8edba661 100644 (file)
@@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                spin_lock_irqsave(&adapter->stats_lock,
                                                  irq_flags);
                                e1000_tbi_adjust_stats(hw, &adapter->stats,
-                                                      length, skb->data);
+                                                      length, mapped);
                                spin_unlock_irqrestore(&adapter->stats_lock,
                                                       irq_flags);
                                length--;
index 36db4df09aed6531cf66d893b40449a1b0cceb2c..1f063dcd8f85e39c15da5d82c935113af8044ffb 100644 (file)
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
        ctrl = er32(CTRL);
        status = er32(STATUS);
        rxcw = er32(RXCW);
+       /* SYNCH bit and IV bit are sticky */
+       udelay(10);
+       rxcw = er32(RXCW);
 
        if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
 
index 351a4097b2baec09c53ce45cace4c03c1c8dcb47..76edbc1be33b4d1151e72c24c1f621c830c3a039 100644 (file)
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
index d863075df7a407cc59e194feebd652c622729384..905e2147d9182f4c8d9b26de66820fdc130debf0 100644 (file)
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
         * When SoL/IDER sessions are active, autoneg/speed/duplex
         * cannot be changed
         */
-       if (hw->phy.ops.check_reset_block(hw)) {
+       if (hw->phy.ops.check_reset_block &&
+           hw->phy.ops.check_reset_block(hw)) {
                e_err("Cannot change link characteristics when SoL/IDER is active.\n");
                return -EINVAL;
        }
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
         * PHY loopback cannot be performed if SoL/IDER
         * sessions are active
         */
-       if (hw->phy.ops.check_reset_block(hw)) {
+       if (hw->phy.ops.check_reset_block &&
+           hw->phy.ops.check_reset_block(hw)) {
                e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
                *data = 0;
                goto out;
index bbf70ba367da0c750997ff35f7db517a95eb74a2..e3a7b07df6294781559a72537d7949b711e82d00 100644 (file)
 #define I217_EEE_100_SUPPORTED  (1 << 1)       /* 100BaseTx EEE supported */
 
 /* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL                 PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL                 BM_PHY_REG(BM_WUC_PAGE, 70)
 #define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
 #define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_MASK                0x1000
+#define I217_SxCTRL_ENABLE_LPI_RESET    0x1000
 #define I217_CGFREG                     PHY_REG(772, 29)
-#define I217_CGFREG_MASK                0x0002
+#define I217_CGFREG_ENABLE_MTA_RESET    0x0002
 #define I217_MEMPWR                     PHY_REG(772, 26)
-#define I217_MEMPWR_MASK                0x0010
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
 
 /* Strapping Option Register - RO */
 #define E1000_STRAP                     0x0000C
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
  **/
 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
-       u16 phy_reg;
-       u32 phy_id;
+       u16 phy_reg = 0;
+       u32 phy_id = 0;
+       s32 ret_val;
+       u16 retry_count;
+
+       for (retry_count = 0; retry_count < 2; retry_count++) {
+               ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF))
+                       continue;
+               phy_id = (u32)(phy_reg << 16);
 
-       e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
-       phy_id = (u32)(phy_reg << 16);
-       e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
-       phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF)) {
+                       phy_id = 0;
+                       continue;
+               }
+               phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               break;
+       }
 
        if (hw->phy.id) {
                if (hw->phy.id == phy_id)
                        return true;
-       } else {
-               if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
-                       hw->phy.id = phy_id;
+       } else if (phy_id) {
+               hw->phy.id = phy_id;
+               hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
                return true;
        }
 
-       return false;
+       /*
+        * In case the PHY needs to be in mdio slow mode,
+        * set slow mode and try to get the PHY id again.
+        */
+       hw->phy.ops.release(hw);
+       ret_val = e1000_set_mdio_slow_mode_hv(hw);
+       if (!ret_val)
+               ret_val = e1000e_get_phy_id(hw);
+       hw->phy.ops.acquire(hw);
+
+       return !ret_val;
 }
 
 /**
@@ -4089,12 +4111,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                         * power good.
                         */
                        e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
-                       phy_reg |= I217_SxCTRL_MASK;
+                       phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
                        e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
 
                        /* Disable the SMB release on LCD reset. */
                        e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
-                       phy_reg &= ~I217_MEMPWR;
+                       phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
                }
 
@@ -4103,7 +4125,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                 * Support
                 */
                e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
-               phy_reg |= I217_CGFREG_MASK;
+               phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 
 release:
@@ -4176,7 +4198,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
                        ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
                        if (ret_val)
                                goto release;
-                       phy_reg |= I217_MEMPWR_MASK;
+                       phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
 
                        /* Disable Proxy */
@@ -4186,7 +4208,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
                ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
                if (ret_val)
                        goto release;
-               phy_reg &= ~I217_CGFREG_MASK;
+               phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 release:
                if (ret_val)
index 026e8b3ab52eee6be5ecadf5ee0de2745eae5faf..a13439928488c7aeab640d0a99f0833a5e74f203 100644 (file)
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
         * In the case of the phy reset being blocked, we already have a link.
         * We do not need to set it up again.
         */
-       if (hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
                return 0;
 
        /*
index a4b0435b00dc83078776346d9a510923b1cc20ac..623e30b9964de29e091f77564ccd53ce8ba30876 100644 (file)
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-                             __le16 csum, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        if (status & E1000_RXD_STAT_IXSM)
                return;
 
-       /* TCP/UDP checksum error bit is set */
-       if (errors & E1000_RXD_ERR_TCPE) {
+       /* TCP/UDP checksum error bit or IP checksum error bit is set */
+       if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
 
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status & E1000_RXD_STAT_TCPCS) {
-               /* TCP checksum is good */
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               /*
-                * IP fragment with UDP payload
-                * Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)swab16((__force u16)csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       }
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_good++;
 }
 
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                skb_put(skb, length);
 
                /* Receive Checksum Offload */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1341,8 +1328,7 @@ copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                        }
                }
 
-               /* Receive Checksum Offload XXX recompute due to CRC strip? */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               /* Receive Checksum Offload */
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
-       if (adapter->netdev->features & NETIF_F_RXCSUM) {
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
                rxcsum |= E1000_RXCSUM_TUOFL;
-
-               /*
-                * IPv4 payload checksum for UDP fragments must be
-                * used in conjunction with packet-split.
-                */
-               if (adapter->rx_ps_pages)
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-       } else {
+       else
                rxcsum &= ~E1000_RXCSUM_TUOFL;
-               /* no need to clear IPPCSE as it defaults to 0 */
-       }
        ew32(RXCSUM, rxcsum);
 
        if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * IP payload checksum (enabled with jumbos/packet-split when
-                * Rx checksum is enabled) and generation of RSS hash is
-                * mutually exclusive in the hardware.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) &&
-                   (netdev->features & NETIF_F_RXHASH)) {
-                       e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-                       return -EINVAL;
-               }
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
+               return -EINVAL;
        }
 
        /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
                         NETIF_F_RXALL)))
                return 0;
 
-       /*
-        * IP payload checksum (enabled with jumbos/packet-split when Rx
-        * checksum is enabled) and generation of RSS hash is mutually
-        * exclusive in the hardware.
-        */
-       if (adapter->rx_ps_pages &&
-           (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-               e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-               return -EINVAL;
-       }
-
        if (changed & NETIF_F_RXFCS) {
                if (features & NETIF_F_RXFCS) {
                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -6237,7 +6190,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                adapter->hw.phy.ms_type = e1000_ms_hw_default;
        }
 
-       if (hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
                e_info("PHY reset is blocked due to SOL/IDER session.\n");
 
        /* Set initial default active device features */
@@ -6404,7 +6357,7 @@ err_register:
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_release_hw_control(adapter);
 err_eeprom:
-       if (!hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
                e1000_phy_hw_reset(&adapter->hw);
 err_hw_init:
        kfree(adapter->tx_ring);
index 0334d013bc3c828fc2256ae117287809f46a9d2c..b860d4f7ea2a950a7b24d0db8ca6f15446f1bfd3 100644 (file)
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
        s32 ret_val;
        u32 ctrl;
 
-       ret_val = phy->ops.check_reset_block(hw);
-       if (ret_val)
-               return 0;
+       if (phy->ops.check_reset_block) {
+               ret_val = phy->ops.check_reset_block(hw);
+               if (ret_val)
+                       return 0;
+       }
 
        ret_val = phy->ops.acquire(hw);
        if (ret_val)
index e65083958421bf06bdd7d45a51b1f53128de2dec..5e84eaac48c191727d9ac733c80ffbb13ce1473f 100644 (file)
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
                break;
        case e1000_i350:
-       case e1000_i210:
-       case e1000_i211:
                mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
                break;
        default:
index 8ce67064b9c5802efe098f702486477b424c273e..90eef07943f4d50bbd027646e170abca5abfb1a3 100644 (file)
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
-           ((ec->rx_coalesce_usecs > 3) &&
-            (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
-           (ec->rx_coalesce_usecs == 2))
-               return -EINVAL;
-
-       /* convert to rate of irq's per second */
-       if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+       if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+               adapter->requested_itr = 1000000000 /
+                                       (adapter->current_itr * 256);
+       } else if ((ec->rx_coalesce_usecs == 3) ||
+                  (ec->rx_coalesce_usecs == 2)) {
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
-       } else {
-               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+       } else if (ec->rx_coalesce_usecs == 0) {
+               /*
+                * The user's desire is to turn off interrupt throttling
+                * altogether, but due to HW limitations, we can't do that.
+                * Instead we set a very small value in EITR, which would
+                * allow ~967k interrupts per second, but allow the adapter's
+                * internal clocking to still function properly.
+                */
+               adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       }
+       } else
+               return -EINVAL;
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
index 3ef3c5284e522af9f797aada6e721486cce5190a..7af291e236bf91b7317fd02ddac47ee6c7cf0bcf 100644 (file)
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t {
        __IXGBE_HANG_CHECK_ARMED,
        __IXGBE_RX_RSC_ENABLED,
        __IXGBE_RX_CSUM_UDP_ZERO_ERR,
-       __IXGBE_RX_FCOE_BUFSZ,
+       __IXGBE_RX_FCOE,
 };
 
 #define check_for_tx_hang(ring) \
@@ -290,7 +290,7 @@ struct ixgbe_ring_feature {
 #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
-       return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
 }
 #else
 #define ixgbe_rx_pg_order(_ring) 0
index af1a5314b494f90d362d647ad5921d708a5fa87f..c377706e81a8f75d89f800c0b527109e4d66f335 100644 (file)
@@ -634,7 +634,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((rxr_idx >= f->mask) &&
                            (rxr_idx < f->mask + f->indices))
-                               set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+                               set_bit(__IXGBE_RX_FCOE, &ring->state);
                }
 
 #endif /* IXGBE_FCOE */
index bf20457ea23aba4a249837aca419d4250a079fcf..e242104ab471a23ed6203dc4a4e4cda82edf44e7 100644 (file)
@@ -1058,17 +1058,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
 #ifdef IXGBE_FCOE
 /**
  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
  * @rx_desc: advanced rx descriptor
  *
  * Returns : true if it is FCoE pkt
  */
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
                                    union ixgbe_adv_rx_desc *rx_desc)
 {
        __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 
-       return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
               ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
                (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
                             IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
 
        /* alloc new page for storage */
        if (likely(!page)) {
-               page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+               page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
                                   ixgbe_rx_pg_order(rx_ring));
                if (unlikely(!page)) {
                        rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
                                     union ixgbe_adv_rx_desc *rx_desc,
                                     struct sk_buff *skb)
 {
+       struct net_device *dev = rx_ring->netdev;
+
        ixgbe_update_rsc_stats(rx_ring, skb);
 
        ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
                ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
 #endif
 
-       if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+       if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+           ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
                u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
                __vlan_hwaccel_put_tag(skb, vid);
        }
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
 
-       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+       skb->protocol = eth_type_trans(skb, dev);
 }
 
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1546,6 +1549,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                skb->truesize -= ixgbe_rx_bufsz(rx_ring);
        }
 
+#ifdef IXGBE_FCOE
+       /* do not attempt to pad FCoE Frames as this will disrupt DDP */
+       if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+               return false;
+
+#endif
        /* if skb_pad returns an error the skb was freed */
        if (unlikely(skb->len < 60)) {
                int pad_len = 60 - skb->len;
@@ -1772,7 +1781,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
-               if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+               if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
                        ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
                        if (!ddp_bytes) {
                                dev_kfree_skb_any(skb);
@@ -3607,10 +3616,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-
-       /* Enable VLAN tag insert/strip */
-       adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
-
        hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
 
 #ifdef IXGBE_FCOE
@@ -6642,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
                return -EINVAL;
        }
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               e_err(drv, "Enable failed, SR-IOV enabled\n");
+               return -EINVAL;
+       }
+
        /* Hardware supports up to 8 traffic classes */
        if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
            (hw->mac.type == ixgbe_mac_82598EB &&
@@ -6701,11 +6711,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-#ifdef CONFIG_DCB
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-               features &= ~NETIF_F_HW_VLAN_RX;
-#endif
-
        /* return error if RXHASH is being enabled when RSS is not supported */
        if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
                features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6723,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
                features &= ~NETIF_F_LRO;
 
-
        return features;
 }
 
@@ -6766,6 +6770,11 @@ static int ixgbe_set_features(struct net_device *netdev,
                need_reset = true;
        }
 
+       if (features & NETIF_F_HW_VLAN_RX)
+               ixgbe_vlan_strip_enable(adapter);
+       else
+               ixgbe_vlan_strip_disable(adapter);
+
        if (changed & NETIF_F_RXALL)
                need_reset = true;
 
index ddc6a4d193028694f30c9835e5df76df09a91366..dcebd128becf96976c95797dfd2c32a6aeade5e9 100644 (file)
@@ -708,6 +708,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 incval = 0;
+       u32 timinca = 0;
        u32 shift = 0;
        u32 cycle_speed;
        unsigned long flags;
@@ -730,8 +731,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
                break;
        }
 
-       /* Bail if the cycle speed didn't change */
-       if (adapter->cycle_speed == cycle_speed)
+       /*
+        * grab the current TIMINCA value from the register so that it can be
+        * double checked. If the register value has been cleared, it must be
+        * reset to the correct value for generating a cyclecounter. If
+        * TIMINCA is zero, the SYSTIME registers do not increment at all.
+        */
+       timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
+
+       /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
+       if (adapter->cycle_speed == cycle_speed && timinca)
                return;
 
        /* disable the SDP clock out */
index f69ec4288b104e6c175b54b17fe4ceecc363fba1..41e32257a4e873e37d4daf24798400c394416b69 100644 (file)
@@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
        unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
 
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+               return true;
+
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
        eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                tx_ring = &(adapter->tx_ring[r_idx]);
-               tx_ring->total_bytes = 0;
-               tx_ring->total_packets = 0;
                ixgbevf_clean_tx_irq(adapter, tx_ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
@@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbevf_ring  *rx_ring;
        int r_idx;
-       int i;
-
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
-               rx_ring->total_bytes = 0;
-               rx_ring->total_packets = 0;
-               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
 
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
index 04d901d0ff635f284185175bdc3698bc7940038b..f0f06b2bc28b3f951933c19bde6e788adcc7c5bf 100644 (file)
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
        /*
         * Hardware-specific parameters.
         */
+#if defined(CONFIG_HAVE_CLK)
        struct clk *clk;
+#endif
        unsigned int t_clk;
 };
 
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        mp->dev = dev;
 
        /*
-        * Get the clk rate, if there is one, otherwise use the default.
+        * Start with a default rate, and if there is a clock, allow
+        * it to override the default.
         */
+       mp->t_clk = 133000000;
+#if defined(CONFIG_HAVE_CLK)
        mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
        if (!IS_ERR(mp->clk)) {
                clk_prepare_enable(mp->clk);
                mp->t_clk = clk_get_rate(mp->clk);
-       } else {
-               mp->t_clk = 133000000;
-               printk(KERN_WARNING "Unable to get clock");
        }
-
+#endif
        set_params(mp, pd);
        netif_set_real_num_tx_queues(dev, mp->txq_count);
        netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
                phy_detach(mp->phy);
        cancel_work_sync(&mp->tx_timeout_task);
 
+#if defined(CONFIG_HAVE_CLK)
        if (!IS_ERR(mp->clk)) {
                clk_disable_unprepare(mp->clk);
                clk_put(mp->clk);
        }
+#endif
+
        free_netdev(mp->dev);
 
        platform_set_drvdata(pdev, NULL);
index cace36f2ab921772417b515182762c54412f13fa..28a54451a3e5060344c91af03cc1d51d557bae59 100644 (file)
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
        struct sky2_port *sky2 = netdev_priv(dev);
        netdev_features_t changed = dev->features ^ features;
 
-       if (changed & NETIF_F_RXCSUM) {
-               bool on = features & NETIF_F_RXCSUM;
-               sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
-                            on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+       if ((changed & NETIF_F_RXCSUM) &&
+           !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+               sky2_write32(sky2->hw,
+                            Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+                            (features & NETIF_F_RXCSUM)
+                            ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
        }
 
        if (changed & NETIF_F_RXHASH)
index 1bcead1fa2f65f333ed16638148485a4c3891d54..842c8ce9494e0134ea02f7e962298737c11b19d2 100644 (file)
@@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = NULL
+               .wrapper = mlx4_QUERY_FW_wrapper
        },
        {
                .opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = NULL
+               .wrapper = mlx4_QUERY_DEV_CAP_wrapper
        },
        {
                .opcode = MLX4_CMD_QUERY_FUNC_CAP,
index 988b2424e1c6085f0486569e79ca121b19c3fa44..69ba57270481c1dc9e5b5a0ea828c209ad38ffac 100644 (file)
@@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
        struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
        struct mlx4_en_priv *priv;
 
-       if (!mdev->pndev[port])
-               return;
-
-       priv = netdev_priv(mdev->pndev[port]);
        switch (event) {
        case MLX4_DEV_EVENT_PORT_UP:
        case MLX4_DEV_EVENT_PORT_DOWN:
+               if (!mdev->pndev[port])
+                       return;
+               priv = netdev_priv(mdev->pndev[port]);
                /* To prevent races, we poll the link state in a separate
                  task rather than changing it here */
                priv->link_state = event;
@@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
                break;
 
        default:
-               mlx4_warn(mdev, "Unhandled event: %d\n", event);
+               if (port < 1 || port > dev->caps.num_ports ||
+                   !mdev->pndev[port])
+                       return;
+               mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
        }
 }
 
index 926d8aac941c67c8e1160c120073b14aaddb778b..073b85b45fc5d8db3f4be778f32c085eea0b7790 100644 (file)
@@ -929,15 +929,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
                if (priv->rx_cq[i].buf)
                        mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
        }
+
+       if (priv->base_tx_qpn) {
+               mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+               priv->base_tx_qpn = 0;
+       }
 }
 
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_port_profile *prof = priv->prof;
        int i;
-       int base_tx_qpn, err;
+       int err;
 
-       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
        if (err) {
                en_err(priv, "failed reserving range for TX rings\n");
                return err;
@@ -949,7 +954,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
                                      prof->tx_ring_size, i, TX))
                        goto err;
 
-               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
                                           prof->tx_ring_size, TXBB_SIZE))
                        goto err;
        }
@@ -969,7 +974,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 
 err:
        en_err(priv, "Failed to allocate NIC resources\n");
-       mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
        return -ENOMEM;
 }
 
@@ -1204,9 +1208,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
        /* Configure port */
+       mlx4_en_calc_rx_buf(dev);
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   MLX4_EN_MIN_MTU,
-                                   0, 0, 0, 0);
+                                   priv->rx_skb_size + ETH_FCS_LEN,
+                                   prof->tx_pause, prof->tx_ppp,
+                                   prof->rx_pause, prof->rx_ppp);
        if (err) {
                en_err(priv, "Failed setting port general configurations "
                       "for port %d, with error %d\n", priv->port, err);
index 3b6f8efbf141278e8ad0958df64d7a852cf933fa..bce98d9c0039b87e74f8d7b7fbd9db3b0281d0bb 100644 (file)
@@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 
                        mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
 
-                       if (flr_slave > dev->num_slaves) {
+                       if (flr_slave >= dev->num_slaves) {
                                mlx4_warn(dev,
                                          "Got FLR for unknown function: %d\n",
                                          flr_slave);
index 68f5cd6cb3c7c4291edd5f0972d586389ccd1fa1..9c83bb8151ea5be38395939f57cfa677b729717a 100644 (file)
@@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-                          MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
                for (i = 1; i <= dev_cap->num_ports; ++i) {
                        err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-                                          MLX4_CMD_TIME_CLASS_B,
-                                          !mlx4_is_slave(dev));
+                                          MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
                        if (err)
                                goto out;
 
@@ -669,6 +668,28 @@ out:
        return err;
 }
 
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int     err = 0;
+       u8      field;
+
+       err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+       if (err)
+               return err;
+
+       /* For guests, report Blueflame disabled */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
+       field &= 0x7f;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
+
+       return 0;
+}
+
 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                            struct mlx4_vhcr *vhcr,
                            struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
                ((fw_ver & 0xffff0000ull) >> 16) |
                ((fw_ver & 0x0000ffffull) << 16);
 
+       if (mlx4_is_slave(dev))
+               goto out;
+
        MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
        dev->caps.function = lg;
 
@@ -927,6 +951,27 @@ out:
        return err;
 }
 
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       u8 *outbuf;
+       int err;
+
+       outbuf = outbox->buf;
+       err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+       if (err)
+               return err;
+
+       /* for slaves, zero out everything except FW version */
+       outbuf[0] = outbuf[1] = 0;
+       memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
+       return 0;
+}
+
 static void get_board_id(void *vsd, char *board_id)
 {
        int i;
index 2e024a68fa814573d858ac382147f1f4d923b835..a0313de122de2338684035d51f29bbc5a8ae4e34 100644 (file)
@@ -142,12 +142,6 @@ struct mlx4_port_config {
        struct pci_dev *pdev;
 };
 
-static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
-{
-       return dev->caps.reserved_eqs +
-               MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
-}
-
 int mlx4_check_port_params(struct mlx4_dev *dev,
                           enum mlx4_port_type *port_type)
 {
@@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        }
 
        dev->caps.num_ports          = dev_cap->num_ports;
+       dev->phys_caps.num_phys_eqs  = MLX4_MAX_EQ_NUM;
        for (i = 1; i <= dev->caps.num_ports; ++i) {
                dev->caps.vl_cap[i]         = dev_cap->max_vl[i];
                dev->caps.ib_mtu_cap[i]     = dev_cap->ib_mtu[i];
@@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
 
        memset(&dev_cap, 0, sizeof(dev_cap));
+       dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
        err = mlx4_dev_cap(dev, &dev_cap);
        if (err) {
                mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
                return err;
        }
 
+       err = mlx4_QUERY_FW(dev);
+       if (err)
+               mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+
        page_size = ~dev->caps.page_size_cap + 1;
        mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
        if (page_size > PAGE_SIZE) {
@@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        dev->caps.num_mgms              = 0;
        dev->caps.num_amgms             = 0;
 
-       for (i = 1; i <= dev->caps.num_ports; ++i)
-               dev->caps.port_mask[i] = dev->caps.port_type[i];
-
        if (dev->caps.num_ports > MLX4_MAX_PORTS) {
                mlx4_err(dev, "HCA has %d ports, but we only support %d, "
                         "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
                return -ENODEV;
        }
 
+       for (i = 1; i <= dev->caps.num_ports; ++i)
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
+
        if (dev->caps.uar_page_size * (dev->caps.num_uars -
                                       dev->caps.reserved_uars) >
                                       pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
                return -ENODEV;
        }
 
-#if 0
-       mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
-       mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
-                 dev->caps.num_uars, dev->caps.reserved_uars,
-                 dev->caps.uar_page_size * dev->caps.num_uars,
-                 pci_resource_len(dev->pdev, 2));
-       mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
-                 dev->caps.reserved_eqs);
-       mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
-                 dev->caps.num_pds, dev->caps.reserved_pds,
-                 dev->caps.slave_pd_shift, dev->caps.pd_base);
-#endif
        return 0;
 }
 
@@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
        if (err)
                goto err_srq;
 
-       num_eqs = (mlx4_is_master(dev)) ?
-               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
-               dev->caps.num_eqs;
+       num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+                 dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
                                  cmpt_base +
                                  ((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
        }
 
 
-       num_eqs = (mlx4_is_master(dev)) ?
-               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
-               dev->caps.num_eqs;
+       num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+                  dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.table,
                                  init_hca->eqc_base, dev_cap->eqc_entry_sz,
                                  num_eqs, num_eqs, 0, 0);
@@ -1989,6 +1975,8 @@ slave_start:
        if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
            !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
+               dev->caps.num_comp_vectors = 1;
+               dev->caps.comp_pool        = 0;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
        }
index 86b6e5a2fabf93c7494e013dc383f5c32c650bf1..e5d20220762cf6437078062676607d0d0423779c 100644 (file)
@@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev);
 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
                                enum mlx4_res_tracker_free_type type);
 
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
                          struct mlx4_vhcr *vhcr,
                          struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
                            struct mlx4_cmd_mailbox *inbox,
                            struct mlx4_cmd_mailbox *outbox,
                            struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                            struct mlx4_vhcr *vhcr,
                            struct mlx4_cmd_mailbox *inbox,
index 6ae350921b1afa460a85e42f8aa498e33881a26b..225c20d47900b488770298ffa34a6defe6d06844 100644 (file)
@@ -495,6 +495,7 @@ struct mlx4_en_priv {
        int vids[128];
        bool wol;
        struct device *ddev;
+       int base_tx_qpn;
 
 #ifdef CONFIG_MLX4_EN_DCB
        struct ieee_ets ets;
index 1fe2c7a8b40c44883fe3eb4d8def444686e64c06..a8fb52992c6403618806d6bcc1744f5c2c22ed0e 100644 (file)
@@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
        if (slave != dev->caps.function)
                memset(inbox->buf, 0, 256);
        if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
-               *(u8 *) inbox->buf         = !!reset_qkey_viols << 6;
+               *(u8 *) inbox->buf         |= !!reset_qkey_viols << 6;
                ((__be32 *) inbox->buf)[2] = agg_cap_mask;
        } else {
-               ((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+               ((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
                ((__be32 *) inbox->buf)[1] = agg_cap_mask;
        }
 
index 06e5adeb76f71840d7a34f2221c607dfcefcb723..b83bc928d52a9fa1bfc2f78957b5699979af6db5 100644 (file)
@@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
        profile[MLX4_RES_AUXC].num    = request->num_qp;
        profile[MLX4_RES_SRQ].num     = request->num_srq;
        profile[MLX4_RES_CQ].num      = request->num_cq;
-       profile[MLX4_RES_EQ].num      = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
+       profile[MLX4_RES_EQ].num      = mlx4_is_mfunc(dev) ?
+                                       dev->phys_caps.num_phys_eqs :
+                                       min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
        profile[MLX4_RES_DMPT].num    = request->num_mpt;
        profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
        profile[MLX4_RES_MTT].num     = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        init_hca->log_num_cqs = profile[i].log_num;
                        break;
                case MLX4_RES_EQ:
-                       dev->caps.num_eqs     = profile[i].num;
+                       dev->caps.num_eqs     = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
+                                                                        MAX_MSIX));
                        init_hca->eqc_base    = profile[i].start;
-                       init_hca->log_num_eqs = profile[i].log_num;
+                       init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
                        break;
                case MLX4_RES_DMPT:
                        dev->caps.num_mpts      = profile[i].num;
index 8d2666fcffd7eea6e2d913497be0f7ab080439c3..083d6715335cdb76200b905af3ee3ff4f25495f7 100644 (file)
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
                        /* Update stats */
                        ndev->stats.tx_packets++;
                        ndev->stats.tx_bytes += skb->len;
-
-                       /* Free buffer */
-                       dev_kfree_skb_irq(skb);
                }
+               dev_kfree_skb_irq(skb);
 
                txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
        }
 
-       if (netif_queue_stopped(ndev))
-               netif_wake_queue(ndev);
+       if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
+               if (netif_queue_stopped(ndev))
+                       netif_wake_queue(ndev);
+       }
 }
 
 static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
        .ndo_set_rx_mode        = lpc_eth_set_multicast_list,
        .ndo_do_ioctl           = lpc_eth_ioctl,
        .ndo_set_mac_address    = lpc_set_mac_address,
+       .ndo_change_mtu         = eth_change_mtu,
 };
 
 static int lpc_eth_drv_probe(struct platform_device *pdev)
index 46e77a2c51219223909f40d6fb223c0981d37a26..ad98f4d7919deaa73154810ceedf24949e98beeb 100644 (file)
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 
        for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
                pfn = pci_info[i].id;
-               if (pfn > QLCNIC_MAX_PCI_FUNC) {
+               if (pfn >= QLCNIC_MAX_PCI_FUNC) {
                        ret = QL_STATUS_INVALID_PARAM;
                        goto err_eswitch;
                }
index 4de73643fec676396835c3c7582c287d8648b322..d1827e887f4e9d82ce8d46ee33cfb61052ee2816 100644 (file)
@@ -1096,20 +1096,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
        if (err) {
                dev_err(&pdev->dev, "32-bit PCI DMA addresses"
                                "not supported by the card\n");
-               goto err_out;
+               goto err_out_disable_dev;
        }
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
                dev_err(&pdev->dev, "32-bit PCI DMA addresses"
                                "not supported by the card\n");
-               goto err_out;
+               goto err_out_disable_dev;
        }
 
        /* IO Size check */
        if (pci_resource_len(pdev, bar) < io_size) {
                dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
                err = -EIO;
-               goto err_out;
+               goto err_out_disable_dev;
        }
 
        pci_set_master(pdev);
@@ -1117,7 +1117,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
        dev = alloc_etherdev(sizeof(struct r6040_private));
        if (!dev) {
                err = -ENOMEM;
-               goto err_out;
+               goto err_out_disable_dev;
        }
        SET_NETDEV_DEV(dev, &pdev->dev);
        lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@ err_out_mdio_irq:
 err_out_mdio:
        mdiobus_free(lp->mii_bus);
 err_out_unmap:
+       netif_napi_del(&lp->napi);
+       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
 err_out_free_dev:
        free_netdev(dev);
+err_out_disable_dev:
+       pci_disable_device(pdev);
 err_out:
        return err;
 }
@@ -1251,6 +1255,9 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
        mdiobus_unregister(lp->mii_bus);
        kfree(lp->mii_bus->irq);
        mdiobus_free(lp->mii_bus);
+       netif_napi_del(&lp->napi);
+       pci_set_drvdata(pdev, NULL);
+       pci_iounmap(pdev, lp->base);
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
index 5eef290997f91414c7c73fc143c45518e3f7c8cd..995d0cfc4c065658ad47279a3705beb27b8c43f0 100644 (file)
@@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 
+       cpw32_f(HiTxRingAddr, 0);
+       cpw32_f(HiTxRingAddr + 4, 0);
+
+       ring_dma = cp->ring_dma;
+       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
        cp_start_hw(cp);
        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 
@@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
 
        cpw8(Config5, cpr8(Config5) & PMEStatus);
 
-       cpw32_f(HiTxRingAddr, 0);
-       cpw32_f(HiTxRingAddr + 4, 0);
-
-       ring_dma = cp->ring_dma;
-       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
-       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
-       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
        cpw16(MultiIntr, 0);
 
        cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
 
 static void eeprom_cmd_end(void __iomem *ee_addr)
 {
-       writeb (~EE_CS, ee_addr);
+       writeb(0, ee_addr);
        eeprom_delay ();
 }
 
index 03df076ed596086abbfd1dde5383badc4614ab8e..1d83565cc6af6dca0e61360c1c60394e1fb5682d 100644 (file)
@@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
        }
 
        /* Terminate the EEPROM access. */
-       RTL_W8 (Cfg9346, ~EE_CS);
+       RTL_W8(Cfg9346, 0);
        eeprom_delay ();
 
        return retval;
index 00b4f56a671cac02790a2519b3f978b4c891457d..d7a04e0911012649f48b85bbf5521ee2dc05ddbe 100644 (file)
@@ -3894,6 +3894,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_22:
        case RTL_GIGA_MAC_VER_23:
        case RTL_GIGA_MAC_VER_24:
+       case RTL_GIGA_MAC_VER_34:
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        default:
@@ -5889,11 +5890,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
        if (status & LinkChg)
                __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
 
-       napi_disable(&tp->napi);
-       rtl_irq_disable(tp);
-
-       napi_enable(&tp->napi);
-       napi_schedule(&tp->napi);
+       rtl_irq_enable_all(tp);
 }
 
 static void rtl_task(struct work_struct *work)
@@ -6345,6 +6342,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
 
        cancel_work_sync(&tp->wk.work);
 
+       netif_napi_del(&tp->napi);
+
        unregister_netdev(dev);
 
        rtl_release_firmware(tp);
@@ -6668,6 +6667,7 @@ out:
        return rc;
 
 err_out_msi_4:
+       netif_napi_del(&tp->napi);
        rtl_disable_msi(pdev, tp);
        iounmap(ioaddr);
 err_out_free_res_3:
index be3c22179161504f39eb2527b85cfa44eb2da685..79bf09b419715773a33dca0d43161e911d0c818a 100644 (file)
@@ -1011,7 +1011,7 @@ static int sh_eth_txfree(struct net_device *ndev)
 }
 
 /* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_rxdesc *rxdesc;
@@ -1101,8 +1101,14 @@ static int sh_eth_rx(struct net_device *ndev)
 
        /* Restart Rx engine if stopped. */
        /* If we don't need to check status, don't. -KDU */
-       if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+       if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
+               /* fix the values for the next receiving if RDE is set */
+               if (intr_status & EESR_RDE)
+                       mdp->cur_rx = mdp->dirty_rx =
+                               (sh_eth_read(ndev, RDFAR) -
+                                sh_eth_read(ndev, RDLAR)) >> 4;
                sh_eth_write(ndev, EDRRR_R, EDRRR);
+       }
 
        return 0;
 }
@@ -1199,8 +1205,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                /* Receive Descriptor Empty int */
                ndev->stats.rx_over_errors++;
 
-               if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
-                       sh_eth_write(ndev, EDRRR_R, EDRRR);
                if (netif_msg_rx_err(mdp))
                        dev_err(&ndev->dev, "Receive Descriptor Empty\n");
        }
@@ -1271,7 +1275,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                        EESR_RTSF | /* short frame recv */
                        EESR_PRE  | /* PHY-LSI recv error */
                        EESR_CERF)){ /* recv frame CRC error */
-               sh_eth_rx(ndev);
+               sh_eth_rx(ndev, intr_status);
        }
 
        /* Tx Check */
index dab9c6f671ec69a4ced1f1ea72c83b171f0ad8b0..1466e5d2af44a438e2cf04205edf908b997c6966 100644 (file)
@@ -2390,11 +2390,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
        retval = smsc911x_request_resources(pdev);
        if (retval)
-               goto out_return_resources;
+               goto out_request_resources_fail;
 
        retval = smsc911x_enable_resources(pdev);
        if (retval)
-               goto out_disable_resources;
+               goto out_enable_resources_fail;
 
        if (pdata->ioaddr == NULL) {
                SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@ out_free_irq:
        free_irq(dev->irq, dev);
 out_disable_resources:
        (void)smsc911x_disable_resources(pdev);
-out_return_resources:
+out_enable_resources_fail:
        smsc911x_free_resources(pdev);
+out_request_resources_fail:
        platform_set_drvdata(pdev, NULL);
        iounmap(pdata->ioaddr);
        free_netdev(dev);
index 036428348faa3e5b58cd261b9f0ec45e0ad3e4fc..9f448279e12a52ea7965c3500bd910c859afbb6b 100644 (file)
@@ -13,7 +13,7 @@ config STMMAC_ETH
 if STMMAC_ETH
 
 config STMMAC_PLATFORM
-       tristate "STMMAC platform bus support"
+       bool "STMMAC Platform bus support"
        depends on STMMAC_ETH
        default y
        ---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
          If unsure, say N.
 
 config STMMAC_PCI
-       tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+       bool "STMMAC PCI bus support (EXPERIMENTAL)"
        depends on STMMAC_ETH && PCI && EXPERIMENTAL
        ---help---
          This is to select the Synopsys DWMAC available on PCI devices,
index fb8377da1687c2166d3ebda44f5a7fa24c6bd785..4b785e10f2ed7d0994d17df944f01907480a767e 100644 (file)
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
                                                csum);
-
+               wmb();
                entry = (++priv->cur_tx) % txsize;
                desc = priv->dma_tx + entry;
 
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                                            len, DMA_TO_DEVICE);
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+               wmb();
                priv->hw->desc->set_tx_owner(desc);
                priv->tx_skbuff[entry] = NULL;
        } else {
index 6b5d060ee9def7dd5fb5a8edac7037cc71bd869b..dc20c56efc9d6dcca84d8981d6648af10e5d66d3 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
+#include <linux/pci.h>
 #include "common.h"
 #ifdef CONFIG_STMMAC_TIMER
 #include "stmmac_timer.h"
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
-
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 static inline int stmmac_clk_enable(struct stmmac_priv *priv)
 {
        if (!IS_ERR(priv->stmmac_clk))
-               return clk_enable(priv->stmmac_clk);
+               return clk_prepare_enable(priv->stmmac_clk);
 
        return 0;
 }
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
        if (IS_ERR(priv->stmmac_clk))
                return;
 
-       clk_disable(priv->stmmac_clk);
+       clk_disable_unprepare(priv->stmmac_clk);
 }
 static inline int stmmac_clk_get(struct stmmac_priv *priv)
 {
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
        return 0;
 }
 #endif /* CONFIG_HAVE_CLK */
+
+
+#ifdef CONFIG_STMMAC_PLATFORM
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+       int err;
+
+       err = platform_driver_register(&stmmac_pltfr_driver);
+       if (err)
+               pr_err("stmmac: failed to register the platform driver\n");
+
+       return err;
+}
+static inline void stmmac_unregister_platform(void)
+{
+       platform_driver_register(&stmmac_pltfr_driver);
+}
+#else
+static inline int stmmac_register_platform(void)
+{
+       pr_debug("stmmac: do not register the platf driver\n");
+
+       return -EINVAL;
+}
+static inline void stmmac_unregister_platform(void)
+{
+}
+#endif /* CONFIG_STMMAC_PLATFORM */
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+       int err;
+
+       err = pci_register_driver(&stmmac_pci_driver);
+       if (err)
+               pr_err("stmmac: failed to register the PCI driver\n");
+
+       return err;
+}
+static inline void stmmac_unregister_pci(void)
+{
+       pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+       pr_debug("stmmac: do not register the PCI driver\n");
+
+       return -EINVAL;
+}
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
index 70966330f44eca825d9456f5cea6723328c5e71b..ea3003edde18671ac13fd887fda9abc5d8a018ce 100644 (file)
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
 
 /**
  * stmmac_selec_desc_mode
- * @dev : device pointer
- * Description: select the Enhanced/Alternate or Normal descriptors */
+ * @priv : private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors
+ */
 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
 {
        if (priv->plat->enh_desc) {
@@ -1211,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
                wmb();
                priv->hw->desc->set_tx_owner(desc);
+               wmb();
        }
 
        /* Interrupt on completition only for the latest segment */
@@ -1226,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* To avoid raise condition */
        priv->hw->desc->set_tx_owner(first);
+       wmb();
 
        priv->cur_tx++;
 
@@ -1289,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                }
                wmb();
                priv->hw->desc->set_rx_owner(p + entry);
+               wmb();
        }
 }
 
@@ -1861,6 +1865,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
 /**
  * stmmac_dvr_probe
  * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
  */
@@ -2090,6 +2096,34 @@ int stmmac_restore(struct net_device *ndev)
 }
 #endif /* CONFIG_PM */
 
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+       int err_plt = 0;
+       int err_pci = 0;
+
+       err_plt = stmmac_register_platform();
+       err_pci = stmmac_register_pci();
+
+       if ((err_pci) && (err_plt)) {
+               pr_err("stmmac: driver registration failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+       stmmac_unregister_platform();
+       stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
index 58fab5303e9cb45daaf4c134900dbb4ea47b60d8..cf826e6b6aa1d21eee5703d1b7edbcaef7b8e1c2 100644 (file)
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
 
 MODULE_DEVICE_TABLE(pci, stmmac_id_table);
 
-static struct pci_driver stmmac_driver = {
+struct pci_driver stmmac_pci_driver = {
        .name = STMMAC_RESOURCE_NAME,
        .id_table = stmmac_id_table,
        .probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
 #endif
 };
 
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
-       int ret;
-
-       ret = pci_register_driver(&stmmac_driver);
-       if (ret < 0)
-               pr_err("%s: ERROR: driver registration failed\n", __func__);
-
-       return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
-       pci_unregister_driver(&stmmac_driver);
-}
-
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
 MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
index 3dd8f08038086896121e9e0031b7ea5af32778b1..680d2b8dfe27990852849744426e115b27e41e4f 100644 (file)
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
 
-static struct platform_driver stmmac_driver = {
+struct platform_driver stmmac_pltfr_driver = {
        .probe = stmmac_pltfr_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = {
                   },
 };
 
-module_platform_driver(stmmac_driver);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index 703c8cce2a2cfae470546a3043fa69dc676ea84f..8c726b7004d32a3ab1c8ba3c677135e82b2e07dc 100644 (file)
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 {
        struct netdev_queue *txq;
-       unsigned int tx_bytes;
        u16 pkt_cnt, tmp;
        int cons, index;
        u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
        netif_printk(np, tx_done, KERN_DEBUG, np->dev,
                     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 
-       tx_bytes = 0;
-       tmp = pkt_cnt;
-       while (tmp--) {
-               tx_bytes += rp->tx_buffs[cons].skb->len;
+       while (pkt_cnt--)
                cons = release_tx_packet(np, rp, cons);
-       }
 
        rp->cons = cons;
        smp_mb();
 
-       netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
-
 out:
        if (unlikely(netif_tx_queue_stopped(txq) &&
                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
                        struct tx_ring_info *rp = &np->tx_rings[i];
 
                        niu_free_tx_ring_info(np, rp);
-                       netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
                }
                kfree(np->tx_rings);
                np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
                prod = NEXT_TX(rp, prod);
        }
 
-       netdev_tx_sent_queue(txq, skb->len);
-
        if (prod < rp->prod)
                rp->wrap_bit ^= TX_RING_KICK_WRAP;
        rp->prod = prod;
index d614c374ed9d2ada5b6923e288528b8201a09807..3b5c4571b55e3c922a4b6e5a94a0a4a8adcb2fce 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
index 2d9218f86bca7fdbb71e61515efc0974b28fa6ad..098b1c42b39368faef868e50fdbb3174a6ccf8d9 100644 (file)
@@ -7,6 +7,8 @@ config TILE_NET
        depends on TILE
        default y
        select CRC32
+       select TILE_GXIO_MPIPE if TILEGX
+       select HIGH_RES_TIMERS if TILEGX
        ---help---
          This is a standard Linux network device driver for the
          on-chip Tilera Gigabit Ethernet and XAUI interfaces.
index f634f142cab417b48e846de6a464032ee858733c..0ef9eefd32110560be6a51c141154aed71e758cc 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_TILE_NET) += tile_net.o
 ifdef CONFIG_TILEGX
-tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+tile_net-y := tilegx.o
 else
-tile_net-objs := tilepro.o
+tile_net-y := tilepro.o
 endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644 (file)
index 0000000..83b4b38
--- /dev/null
@@ -0,0 +1,1898 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>      /* printk() */
+#include <linux/slab.h>        /* kmalloc() */
+#include <linux/errno.h>       /* error codes */
+#include <linux/types.h>       /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h>   /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+#include <gxio/mpipe.h>
+#include <arch/sim.h>
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* The maximum number of distinct channels (idesc.channel is 5 bits). */
+#define TILE_NET_CHANNELS 32
+
+/* Maximum number of idescs to handle per "poll". */
+#define TILE_NET_BATCH 128
+
+/* Maximum number of packets to handle per "poll". */
+#define TILE_NET_WEIGHT 64
+
+/* Number of entries in each iqueue. */
+#define IQUEUE_ENTRIES 512
+
+/* Number of entries in each equeue. */
+#define EQUEUE_ENTRIES 2048
+
+/* Total header bytes per equeue slot.  Must be big enough for 2 bytes
+ * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
+ * 60 bytes of actual TCP header.  We round up to align to cache lines.
+ */
+#define HEADER_BYTES 128
+
+/* Maximum completions per cpu per device (must be a power of two).
+ * ISSUE: What is the right number here?  If this is too small, then
+ * egress might block waiting for free space in a completions array.
+ * ISSUE: At the least, allocate these only for initialized echannels.
+ */
+#define TILE_NET_MAX_COMPS 64
+
+#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Size of completions data to allocate.
+ * ISSUE: Probably more than needed since we don't use all the channels.
+ */
+#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
+
+/* Size of NotifRing data to allocate. */
+#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
+
+/* Timeout to wake the per-device TX timer after we stop the queue.
+ * We don't want the timeout too short (adds overhead, and might end
+ * up causing stop/wake/stop/wake cycles) or too long (affects performance).
+ * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
+ */
+#define TX_TIMER_DELAY_USEC 30
+
+/* Timeout to wake the per-cpu egress timer to free completions. */
+#define EGRESS_TIMER_DELAY_USEC 1000
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+/* A "packet fragment" (a chunk of memory). */
+struct frag {
+       void *buf;
+       size_t length;
+};
+
+/* A single completion. */
+struct tile_net_comp {
+       /* The "complete_count" when the completion will be complete. */
+       s64 when;
+       /* The buffer to be freed when the completion is complete. */
+       struct sk_buff *skb;
+};
+
+/* The completions for a given cpu and echannel. */
+struct tile_net_comps {
+       /* The completions. */
+       struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
+       /* The number of completions used. */
+       unsigned long comp_next;
+       /* The number of completions freed. */
+       unsigned long comp_last;
+};
+
+/* The transmit wake timer for a given cpu and echannel. */
+struct tile_net_tx_wake {
+       struct hrtimer timer;
+       struct net_device *dev;
+};
+
+/* Info for a specific cpu. */
+struct tile_net_info {
+       /* The NAPI struct. */
+       struct napi_struct napi;
+       /* Packet queue. */
+       gxio_mpipe_iqueue_t iqueue;
+       /* Our cpu. */
+       int my_cpu;
+       /* True if iqueue is valid. */
+       bool has_iqueue;
+       /* NAPI flags. */
+       bool napi_added;
+       bool napi_enabled;
+       /* Number of small sk_buffs which must still be provided. */
+       unsigned int num_needed_small_buffers;
+       /* Number of large sk_buffs which must still be provided. */
+       unsigned int num_needed_large_buffers;
+       /* A timer for handling egress completions. */
+       struct hrtimer egress_timer;
+       /* True if "egress_timer" is scheduled. */
+       bool egress_timer_scheduled;
+       /* Comps for each egress channel. */
+       struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+       /* Transmit wake timer for each egress channel. */
+       struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+};
+
+/* Info for egress on a particular egress channel. */
+struct tile_net_egress {
+       /* The "equeue". */
+       gxio_mpipe_equeue_t *equeue;
+       /* The headers for TSO. */
+       unsigned char *headers;
+};
+
+/* Info for a specific device. */
+struct tile_net_priv {
+       /* Our network device. */
+       struct net_device *dev;
+       /* The primary link. */
+       gxio_mpipe_link_t link;
+       /* The primary channel, if open, else -1. */
+       int channel;
+       /* The "loopify" egress link, if needed. */
+       gxio_mpipe_link_t loopify_link;
+       /* The "loopify" egress channel, if open, else -1. */
+       int loopify_channel;
+       /* The egress channel (channel or loopify_channel). */
+       int echannel;
+       /* Total stats. */
+       struct net_device_stats stats;
+};
+
+/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
+static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+
+/* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+/* A mutex for "tile_net_devs_for_channel". */
+static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
+
+/* The per-cpu info. */
+static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
+
+/* The "context" for all devices. */
+static gxio_mpipe_context_t context;
+
+/* Buffer sizes and mpipe enum codes for buffer stacks.
+ * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ */
+#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
+#define BUFFER_SIZE_SMALL 128
+#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
+#define BUFFER_SIZE_LARGE 1664
+
+/* The small/large "buffer stacks". */
+static int small_buffer_stack = -1;
+static int large_buffer_stack = -1;
+
+/* Amount of memory allocated for each buffer stack. */
+static size_t buffer_stack_size;
+
+/* The actual memory allocated for the buffer stacks. */
+static void *small_buffer_stack_va;
+static void *large_buffer_stack_va;
+
+/* The buckets. */
+static int first_bucket = -1;
+static int num_buckets = 1;
+
+/* The ingress irq. */
+static int ingress_irq = -1;
+
+/* Text value of tile_net.cpus if passed as a module parameter. */
+static char *network_cpus_string;
+
+/* The actual cpus in "network_cpus". */
+static struct cpumask network_cpus_map;
+
+/* If "loopify=LINK" was specified, this is "LINK". */
+static char *loopify_link_name;
+
+/* If "tile_net.custom" was specified, this is non-NULL. */
+static char *custom_str;
+
+/* The "tile_net.cpus" argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static bool network_cpus_init(void)
+{
+       char buf[1024];
+       int rc;
+
+       if (network_cpus_string == NULL)
+               return false;
+
+       rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
+       if (rc != 0) {
+               pr_warn("tile_net.cpus=%s: malformed cpu list\n",
+                       network_cpus_string);
+               return false;
+       }
+
+       /* Remove dedicated cpus. */
+       cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
+
+       if (cpumask_empty(&network_cpus_map)) {
+               pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
+                       network_cpus_string);
+               return false;
+       }
+
+       cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+       pr_info("Linux network CPUs: %s\n", buf);
+       return true;
+}
+
+module_param_named(cpus, network_cpus_string, charp, 0444);
+MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
+
+/* The "tile_net.loopify=LINK" argument causes the named device to
+ * actually use "loop0" for ingress, and "loop1" for egress.  This
+ * allows an app to sit between the actual link and linux, passing
+ * (some) packets along to linux, and forwarding (some) packets sent
+ * out by linux.
+ */
+module_param_named(loopify, loopify_link_name, charp, 0444);
+MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
+
+/* The "tile_net.custom" argument causes us to ignore the "conventional"
+ * classifier metadata, in particular, the "l2_offset".
+ */
+module_param_named(custom, custom_str, charp, 0444);
+MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+
+/* Atomically update a statistics field.
+ * Note that on TILE-Gx, this operation is fire-and-forget on the
+ * issuing core (single-cycle dispatch) and takes only a few cycles
+ * longer than a regular store when the request reaches the home cache.
+ * No expensive bus management overhead is required.
+ */
+static void tile_net_stats_add(unsigned long value, unsigned long *field)
+{
+       BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
+       atomic_long_add(value, (atomic_long_t *)field);
+}
+
+/* Allocate and push a buffer. */
+static bool tile_net_provide_buffer(bool small)
+{
+       int stack = small ? small_buffer_stack : large_buffer_stack;
+       const unsigned long buffer_alignment = 128;
+       struct sk_buff *skb;
+       int len;
+
+       len = sizeof(struct sk_buff **) + buffer_alignment;
+       len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+       skb = dev_alloc_skb(len);
+       if (skb == NULL)
+               return false;
+
+       /* Make room for a back-pointer to 'skb' and guarantee alignment. */
+       skb_reserve(skb, sizeof(struct sk_buff **));
+       skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
+
+       /* Save a back-pointer to 'skb'. */
+       *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
+
+       /* Make sure "skb" and the back-pointer have been flushed. */
+       wmb();
+
+       gxio_mpipe_push_buffer(&context, stack,
+                              (void *)va_to_tile_io_addr(skb->data));
+
+       return true;
+}
+
+/* Convert a raw mpipe buffer to its matching skb pointer. */
+static struct sk_buff *mpipe_buf_to_skb(void *va)
+{
+       /* Acquire the associated "skb". */
+       struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+       struct sk_buff *skb = *skb_ptr;
+
+       /* Paranoia. */
+       if (skb->data != va) {
+               /* Panic here since there's a reasonable chance
+                * that corrupt buffers means generic memory
+                * corruption, with unpredictable system effects.
+                */
+               panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
+                     va, skb, skb->data);
+       }
+
+       return skb;
+}
+
+static void tile_net_pop_all_buffers(int stack)
+{
+       for (;;) {
+               tile_io_addr_t addr =
+                       (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+               if (addr == 0)
+                       break;
+               dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
+       }
+}
+
+/* Provide linux buffers to mPIPE. */
+static void tile_net_provide_needed_buffers(void)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+       while (info->num_needed_small_buffers != 0) {
+               if (!tile_net_provide_buffer(true))
+                       goto oops;
+               info->num_needed_small_buffers--;
+       }
+
+       while (info->num_needed_large_buffers != 0) {
+               if (!tile_net_provide_buffer(false))
+                       goto oops;
+               info->num_needed_large_buffers--;
+       }
+
+       return;
+
+oops:
+       /* Add a description to the page allocation failure dump. */
+       pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+}
+
+static inline bool filter_packet(struct net_device *dev, void *buf)
+{
+       /* Filter packets received before we're up. */
+       if (dev == NULL || !(dev->flags & IFF_UP))
+               return true;
+
+       /* Filter out packets that aren't for us. */
+       if (!(dev->flags & IFF_PROMISC) &&
+           !is_multicast_ether_addr(buf) &&
+           compare_ether_addr(dev->dev_addr, buf) != 0)
+               return true;
+
+       return false;
+}
+
+static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
+                                gxio_mpipe_idesc_t *idesc, unsigned long len)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+
+       /* Encode the actual packet length. */
+       skb_put(skb, len);
+
+       skb->protocol = eth_type_trans(skb, dev);
+
+       /* Acknowledge "good" hardware checksums. */
+       if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       netif_receive_skb(skb);
+
+       /* Update stats. */
+       tile_net_stats_add(1, &priv->stats.rx_packets);
+       tile_net_stats_add(len, &priv->stats.rx_bytes);
+
+       /* Need a new buffer. */
+       if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
+               info->num_needed_small_buffers++;
+       else
+               info->num_needed_large_buffers++;
+}
+
+/* Handle a packet.  Return true if "processed", false if "filtered". */
+static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+       uint8_t l2_offset;
+       void *va;
+       void *buf;
+       unsigned long len;
+       bool filter;
+
+       /* Drop packets for which no buffer was available.
+        * NOTE: This happens under heavy load.
+        */
+       if (idesc->be) {
+               struct tile_net_priv *priv = netdev_priv(dev);
+               tile_net_stats_add(1, &priv->stats.rx_dropped);
+               gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+               if (net_ratelimit())
+                       pr_info("Dropping packet (insufficient buffers).\n");
+               return false;
+       }
+
+       /* Get the "l2_offset", if allowed. */
+       l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+
+       /* Get the raw buffer VA (includes "headroom"). */
+       va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+
+       /* Get the actual packet start/length. */
+       buf = va + l2_offset;
+       len = idesc->l2_size - l2_offset;
+
+       /* Point "va" at the raw buffer. */
+       va -= NET_IP_ALIGN;
+
+       filter = filter_packet(dev, buf);
+       if (filter) {
+               gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+       } else {
+               struct sk_buff *skb = mpipe_buf_to_skb(va);
+
+               /* Skip headroom, and any custom header. */
+               skb_reserve(skb, NET_IP_ALIGN + l2_offset);
+
+               tile_net_receive_skb(dev, skb, idesc, len);
+       }
+
+       gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+       return !filter;
+}
+
+/* Handle some packets for the current CPU.
+ *
+ * This function handles up to TILE_NET_BATCH idescs per call.
+ *
+ * ISSUE: Since we do not provide new buffers until this function is
+ * complete, we must initially provide enough buffers for each network
+ * cpu to fill its iqueue and also its batched idescs.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       unsigned int work = 0;
+       gxio_mpipe_idesc_t *idesc;
+       int i, n;
+
+       /* Process packets. */
+       while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+               for (i = 0; i < n; i++) {
+                       if (i == TILE_NET_BATCH)
+                               goto done;
+                       if (tile_net_handle_packet(idesc + i)) {
+                               if (++work >= budget)
+                                       goto done;
+                       }
+               }
+       }
+
+       /* There are no packets left. */
+       napi_complete(&info->napi);
+
+       /* Re-enable hypervisor interrupts. */
+       gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+
+       /* HACK: Avoid the "rotting packet" problem. */
+       if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
+               napi_schedule(&info->napi);
+
+       /* ISSUE: Handle completions? */
+
+done:
+       tile_net_provide_needed_buffers();
+
+       return work;
+}
+
+/* Handle an ingress interrupt on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       napi_schedule(&info->napi);
+       return IRQ_HANDLED;
+}
+
+/* Free some completions.  This must be called with interrupts blocked. */
+static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
+                               struct tile_net_comps *comps,
+                               int limit, bool force_update)
+{
+       int n = 0;
+       while (comps->comp_last < comps->comp_next) {
+               unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
+               struct tile_net_comp *comp = &comps->comp_queue[cid];
+               if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
+                                                  force_update || n == 0))
+                       break;
+               dev_kfree_skb_irq(comp->skb);
+               comps->comp_last++;
+               if (++n == limit)
+                       break;
+       }
+       return n;
+}
+
+/* Add a completion.  This must be called with interrupts blocked.
+ * tile_net_equeue_try_reserve() will have ensured a free completion entry.
+ */
+static void add_comp(gxio_mpipe_equeue_t *equeue,
+                    struct tile_net_comps *comps,
+                    uint64_t when, struct sk_buff *skb)
+{
+       int cid = comps->comp_next % TILE_NET_MAX_COMPS;
+       comps->comp_queue[cid].when = when;
+       comps->comp_queue[cid].skb = skb;
+       comps->comp_next++;
+}
+
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+
+       hrtimer_start(&info->tx_wake[priv->echannel].timer,
+                     ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+                     HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
+{
+       struct tile_net_tx_wake *tx_wake =
+               container_of(t, struct tile_net_tx_wake, timer);
+       netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+       return HRTIMER_NORESTART;
+}
+
+/* Make sure the egress timer is scheduled. */
+static void tile_net_schedule_egress_timer(void)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+       if (!info->egress_timer_scheduled) {
+               hrtimer_start(&info->egress_timer,
+                             ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+                             HRTIMER_MODE_REL_PINNED);
+               info->egress_timer_scheduled = true;
+       }
+}
+
+/* The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected for this tile.
+ */
+static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       unsigned long irqflags;
+       bool pending = false;
+       int i;
+
+       local_irq_save(irqflags);
+
+       /* The timer is no longer scheduled. */
+       info->egress_timer_scheduled = false;
+
+       /* Free all possible comps for this tile. */
+       for (i = 0; i < TILE_NET_CHANNELS; i++) {
+               struct tile_net_egress *egress = &egress_for_echannel[i];
+               struct tile_net_comps *comps = info->comps_for_echannel[i];
+               if (comps->comp_last >= comps->comp_next)
+                       continue;
+               tile_net_free_comps(egress->equeue, comps, -1, true);
+               pending = pending || (comps->comp_last < comps->comp_next);
+       }
+
+       /* Reschedule timer if needed. */
+       if (pending)
+               tile_net_schedule_egress_timer();
+
+       local_irq_restore(irqflags);
+
+       return HRTIMER_NORESTART;
+}
+
+/* Helper function for "tile_net_update()".
+ * "dev" (i.e. arg) is the device being brought up or down,
+ * or NULL if all devices are now down.
+ */
+static void tile_net_update_cpu(void *arg)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct net_device *dev = arg;
+
+       if (!info->has_iqueue)
+               return;
+
+       if (dev != NULL) {
+               if (!info->napi_added) {
+                       netif_napi_add(dev, &info->napi,
+                                      tile_net_poll, TILE_NET_WEIGHT);
+                       info->napi_added = true;
+               }
+               if (!info->napi_enabled) {
+                       napi_enable(&info->napi);
+                       info->napi_enabled = true;
+               }
+               enable_percpu_irq(ingress_irq, 0);
+       } else {
+               disable_percpu_irq(ingress_irq);
+               if (info->napi_enabled) {
+                       napi_disable(&info->napi);
+                       info->napi_enabled = false;
+               }
+               /* FIXME: Drain the iqueue. */
+       }
+}
+
+/* Helper function for tile_net_open() and tile_net_stop().
+ * Always called under tile_net_devs_for_channel_mutex.
+ */
+static int tile_net_update(struct net_device *dev)
+{
+       static gxio_mpipe_rules_t rules;  /* too big to fit on the stack */
+       bool saw_channel = false;
+       int channel;
+       int rc;
+       int cpu;
+
+       gxio_mpipe_rules_init(&rules, &context);
+
+       for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
+               if (tile_net_devs_for_channel[channel] == NULL)
+                       continue;
+               if (!saw_channel) {
+                       saw_channel = true;
+                       gxio_mpipe_rules_begin(&rules, first_bucket,
+                                              num_buckets, NULL);
+                       gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
+               }
+               gxio_mpipe_rules_add_channel(&rules, channel);
+       }
+
+       /* NOTE: This can fail if there is no classifier.
+        * ISSUE: Can anything else cause it to fail?
+        */
+       rc = gxio_mpipe_rules_commit(&rules);
+       if (rc != 0) {
+               netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+               return -EIO;
+       }
+
+       /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, tile_net_update_cpu,
+                                        (saw_channel ? dev : NULL), 1);
+
+       /* HACK: Allow packets to flow in the simulator. */
+       if (saw_channel)
+               sim_enable_mpipe_links(0, -1);
+
+       return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for both small and large packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+{
+       pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
+       int rc;
+
+       /* Compute stack bytes; we round up to 64KB and then use
+        * alloc_pages() so we get the required 64KB alignment as well.
+        */
+       buffer_stack_size =
+               ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
+                     64 * 1024);
+
+       /* Allocate two buffer stack indices. */
+       rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
+                          rc);
+               return rc;
+       }
+       small_buffer_stack = rc;
+       large_buffer_stack = rc + 1;
+
+       /* Allocate the small memory stack. */
+       small_buffer_stack_va =
+               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+       if (small_buffer_stack_va == NULL) {
+               netdev_err(dev,
+                          "Could not alloc %zd bytes for buffer stacks\n",
+                          buffer_stack_size);
+               return -ENOMEM;
+       }
+       rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
+                                         BUFFER_SIZE_SMALL_ENUM,
+                                         small_buffer_stack_va,
+                                         buffer_stack_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+               return rc;
+       }
+       rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+                                              hash_pte, 0);
+       if (rc != 0) {
+               netdev_err(dev,
+                          "gxio_mpipe_register_buffer_memory failed: %d\n",
+                          rc);
+               return rc;
+       }
+
+       /* Allocate the large buffer stack. */
+       large_buffer_stack_va =
+               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+       if (large_buffer_stack_va == NULL) {
+               netdev_err(dev,
+                          "Could not alloc %zd bytes for buffer stacks\n",
+                          buffer_stack_size);
+               return -ENOMEM;
+       }
+       rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
+                                         BUFFER_SIZE_LARGE_ENUM,
+                                         large_buffer_stack_va,
+                                         buffer_stack_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
+                          rc);
+               return rc;
+       }
+       rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
+                                              hash_pte, 0);
+       if (rc != 0) {
+               netdev_err(dev,
+                          "gxio_mpipe_register_buffer_memory failed: %d\n",
+                          rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+/* Allocate per-cpu resources (memory for completions and idescs).
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int alloc_percpu_mpipe_resources(struct net_device *dev,
+                                       int cpu, int ring)
+{
+       struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+       int order, i, rc;
+       struct page *page;
+       void *addr;
+
+       /* Allocate the "comps". */
+       order = get_order(COMPS_SIZE);
+       page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+       if (page == NULL) {
+               netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
+                          COMPS_SIZE);
+               return -ENOMEM;
+       }
+       addr = pfn_to_kaddr(page_to_pfn(page));
+       memset(addr, 0, COMPS_SIZE);
+       for (i = 0; i < TILE_NET_CHANNELS; i++)
+               info->comps_for_echannel[i] =
+                       addr + i * sizeof(struct tile_net_comps);
+
+       /* If this is a network cpu, create an iqueue. */
+       if (cpu_isset(cpu, network_cpus_map)) {
+               order = get_order(NOTIF_RING_SIZE);
+               page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+               if (page == NULL) {
+                       netdev_err(dev,
+                                  "Failed to alloc %zd bytes iqueue memory\n",
+                                  NOTIF_RING_SIZE);
+                       return -ENOMEM;
+               }
+               addr = pfn_to_kaddr(page_to_pfn(page));
+               rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
+                                           addr, NOTIF_RING_SIZE, 0);
+               if (rc < 0) {
+                       netdev_err(dev,
+                                  "gxio_mpipe_iqueue_init failed: %d\n", rc);
+                       return rc;
+               }
+               info->has_iqueue = true;
+       }
+
+       return ring;
+}
+
+/* Initialize NotifGroup and buckets.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_notif_group_and_buckets(struct net_device *dev,
+                                       int ring, int network_cpus_count)
+{
+       int group, rc;
+
+       /* Allocate one NotifGroup. */
+       rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
+                          rc);
+               return rc;
+       }
+       group = rc;
+
+       /* Initialize global num_buckets value. */
+       if (network_cpus_count > 4)
+               num_buckets = 256;
+       else if (network_cpus_count > 1)
+               num_buckets = 16;
+
+       /* Allocate some buckets, and set global first_bucket value. */
+       rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+               return rc;
+       }
+       first_bucket = rc;
+
+       /* Init group and buckets. */
+       rc = gxio_mpipe_init_notif_group_and_buckets(
+               &context, group, ring, network_cpus_count,
+               first_bucket, num_buckets,
+               GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
+       if (rc != 0) {
+               netdev_err(
+                       dev,
+                       "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
+                       rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+/* Create an irq and register it, then activate the irq and request
+ * interrupts on all cores.  Note that "ingress_irq" being initialized
+ * is how we know not to call tile_net_init_mpipe() again.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int tile_net_setup_interrupts(struct net_device *dev)
+{
+       int cpu, rc;
+
+       rc = create_irq();
+       if (rc < 0) {
+               netdev_err(dev, "create_irq failed: %d\n", rc);
+               return rc;
+       }
+       ingress_irq = rc;
+       tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
+       rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
+                        0, NULL, NULL);
+       if (rc != 0) {
+               netdev_err(dev, "request_irq failed: %d\n", rc);
+               destroy_irq(ingress_irq);
+               ingress_irq = -1;
+               return rc;
+       }
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               if (info->has_iqueue) {
+                       gxio_mpipe_request_notif_ring_interrupt(
+                               &context, cpu_x(cpu), cpu_y(cpu),
+                               1, ingress_irq, info->iqueue.ring);
+               }
+       }
+
+       return 0;
+}
+
+/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
+static void tile_net_init_mpipe_fail(void)
+{
+       int cpu;
+
+       /* Do cleanups that require the mpipe context first. */
+       if (small_buffer_stack >= 0)
+               tile_net_pop_all_buffers(small_buffer_stack);
+       if (large_buffer_stack >= 0)
+               tile_net_pop_all_buffers(large_buffer_stack);
+
+       /* Destroy mpipe context so the hardware no longer owns any memory. */
+       gxio_mpipe_destroy(&context);
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               free_pages((unsigned long)(info->comps_for_echannel[0]),
+                          get_order(COMPS_SIZE));
+               info->comps_for_echannel[0] = NULL;
+               free_pages((unsigned long)(info->iqueue.idescs),
+                          get_order(NOTIF_RING_SIZE));
+               info->iqueue.idescs = NULL;
+       }
+
+       if (small_buffer_stack_va)
+               free_pages_exact(small_buffer_stack_va, buffer_stack_size);
+       if (large_buffer_stack_va)
+               free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+
+       small_buffer_stack_va = NULL;
+       large_buffer_stack_va = NULL;
+       large_buffer_stack = -1;
+       small_buffer_stack = -1;
+       first_bucket = -1;
+}
+
+/* The first time any tilegx network device is opened, we initialize
+ * the global mpipe state.  If this step fails, we fail to open the
+ * device, but if it succeeds, we never need to do it again, and since
+ * tile_net can't be unloaded, we never undo it.
+ *
+ * Note that some resources in this path (buffer stack indices,
+ * bindings from init_buffer_stack, etc.) are hypervisor resources
+ * that are freed implicitly by gxio_mpipe_destroy().
+ */
+static int tile_net_init_mpipe(struct net_device *dev)
+{
+       int i, num_buffers, rc;
+       int cpu;
+       int first_ring, ring;
+       int network_cpus_count = cpus_weight(network_cpus_map);
+
+       if (!hash_default) {
+               netdev_err(dev, "Networking requires hash_default!\n");
+               return -EIO;
+       }
+
+       rc = gxio_mpipe_init(&context, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+               return -EIO;
+       }
+
+       /* Set up the buffer stacks. */
+       num_buffers =
+               network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+       rc = init_buffer_stacks(dev, num_buffers);
+       if (rc != 0)
+               goto fail;
+
+       /* Provide initial buffers. */
+       rc = -ENOMEM;
+       for (i = 0; i < num_buffers; i++) {
+               if (!tile_net_provide_buffer(true)) {
+                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+                       goto fail;
+               }
+       }
+       for (i = 0; i < num_buffers; i++) {
+               if (!tile_net_provide_buffer(false)) {
+                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+                       goto fail;
+               }
+       }
+
+       /* Allocate one NotifRing for each network cpu. */
+       rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
+                          rc);
+               goto fail;
+       }
+
+       /* Init NotifRings per-cpu. */
+       first_ring = rc;
+       ring = first_ring;
+       for_each_online_cpu(cpu) {
+               rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
+               if (rc < 0)
+                       goto fail;
+               ring = rc;
+       }
+
+       /* Initialize NotifGroup and buckets. */
+       rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
+       if (rc != 0)
+               goto fail;
+
+       /* Create and enable interrupts. */
+       rc = tile_net_setup_interrupts(dev);
+       if (rc != 0)
+               goto fail;
+
+       return 0;
+
+fail:
+       tile_net_init_mpipe_fail();
+       return rc;
+}
+
+/* Create persistent egress info for a given egress channel.
+ * Note that this may be shared between, say, "gbe0" and "xgbe0".
+ * ISSUE: Defer header allocation until TSO is actually needed?
+ */
+static int tile_net_init_egress(struct net_device *dev, int echannel)
+{
+       struct page *headers_page, *edescs_page, *equeue_page;
+       gxio_mpipe_edesc_t *edescs;
+       gxio_mpipe_equeue_t *equeue;
+       unsigned char *headers;
+       int headers_order, edescs_order, equeue_order;
+       size_t edescs_size;
+       int edma;
+       int rc = -ENOMEM;
+
+       /* Only initialize once. */
+       if (egress_for_echannel[echannel].equeue != NULL)
+               return 0;
+
+       /* Allocate memory for the "headers". */
+       headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
+       headers_page = alloc_pages(GFP_KERNEL, headers_order);
+       if (headers_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for TSO headers.\n",
+                           PAGE_SIZE << headers_order);
+               goto fail;
+       }
+       headers = pfn_to_kaddr(page_to_pfn(headers_page));
+
+       /* Allocate memory for the "edescs". */
+       edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
+       edescs_order = get_order(edescs_size);
+       edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
+       if (edescs_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for eDMA ring.\n",
+                           edescs_size);
+               goto fail_headers;
+       }
+       edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
+
+       /* Allocate memory for the "equeue". */
+       equeue_order = get_order(sizeof(*equeue));
+       equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
+       if (equeue_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for equeue info.\n",
+                           PAGE_SIZE << equeue_order);
+               goto fail_edescs;
+       }
+       equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
+
+       /* Allocate an edma ring.  Note that in practice this can't
+        * fail, which is good, because we will leak an edma ring if so.
+        */
+       rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
+       if (rc < 0) {
+               netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
+                           rc);
+               goto fail_equeue;
+       }
+       edma = rc;
+
+       /* Initialize the equeue. */
+       rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+                                   edescs, edescs_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+               goto fail_equeue;
+       }
+
+       /* Done. */
+       egress_for_echannel[echannel].equeue = equeue;
+       egress_for_echannel[echannel].headers = headers;
+       return 0;
+
+fail_equeue:
+       __free_pages(equeue_page, equeue_order);
+
+fail_edescs:
+       __free_pages(edescs_page, edescs_order);
+
+fail_headers:
+       __free_pages(headers_page, headers_order);
+
+fail:
+       return rc;
+}
+
+/* Return channel number for a newly-opened link. */
+static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
+                             const char *link_name)
+{
+       int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+       if (rc < 0) {
+               netdev_err(dev, "Failed to open '%s'\n", link_name);
+               return rc;
+       }
+       rc = gxio_mpipe_link_channel(link);
+       if (rc < 0 || rc >= TILE_NET_CHANNELS) {
+               netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
+               gxio_mpipe_link_close(link);
+               return -EINVAL;
+       }
+       return rc;
+}
+
+/* Help the kernel activate the given network interface. */
+static int tile_net_open(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int cpu, rc;
+
+       mutex_lock(&tile_net_devs_for_channel_mutex);
+
+       /* Do one-time initialization the first time any device is opened. */
+       if (ingress_irq < 0) {
+               rc = tile_net_init_mpipe(dev);
+               if (rc != 0)
+                       goto fail;
+       }
+
+       /* Determine if this is the "loopify" device. */
+       if (unlikely((loopify_link_name != NULL) &&
+                    !strcmp(dev->name, loopify_link_name))) {
+               rc = tile_net_link_open(dev, &priv->link, "loop0");
+               if (rc < 0)
+                       goto fail;
+               priv->channel = rc;
+               rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
+               if (rc < 0)
+                       goto fail;
+               priv->loopify_channel = rc;
+               priv->echannel = rc;
+       } else {
+               rc = tile_net_link_open(dev, &priv->link, dev->name);
+               if (rc < 0)
+                       goto fail;
+               priv->channel = rc;
+               priv->echannel = rc;
+       }
+
+       /* Initialize egress info (if needed).  Once ever, per echannel. */
+       rc = tile_net_init_egress(dev, priv->echannel);
+       if (rc != 0)
+               goto fail;
+
+       tile_net_devs_for_channel[priv->channel] = dev;
+
+       rc = tile_net_update(dev);
+       if (rc != 0)
+               goto fail;
+
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       /* Initialize the transmit wake timer for this device for each cpu. */
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               struct tile_net_tx_wake *tx_wake =
+                       &info->tx_wake[priv->echannel];
+
+               hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_REL);
+               tx_wake->timer.function = tile_net_handle_tx_wake_timer;
+               tx_wake->dev = dev;
+       }
+
+       for_each_online_cpu(cpu)
+               netif_start_subqueue(dev, cpu);
+       netif_carrier_on(dev);
+       return 0;
+
+fail:
+       if (priv->loopify_channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+                       netdev_warn(dev, "Failed to close loopify link!\n");
+               priv->loopify_channel = -1;
+       }
+       if (priv->channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->link) != 0)
+                       netdev_warn(dev, "Failed to close link!\n");
+               priv->channel = -1;
+       }
+       priv->echannel = -1;
+       tile_net_devs_for_channel[priv->channel] = NULL;
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       /* Don't return raw gxio error codes to generic Linux. */
+       return (rc > -512) ? rc : -EIO;
+}
+
+/* Help the kernel deactivate the given network interface. */
+static int tile_net_stop(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               struct tile_net_tx_wake *tx_wake =
+                       &info->tx_wake[priv->echannel];
+
+               hrtimer_cancel(&tx_wake->timer);
+               netif_stop_subqueue(dev, cpu);
+       }
+
+       mutex_lock(&tile_net_devs_for_channel_mutex);
+       tile_net_devs_for_channel[priv->channel] = NULL;
+       (void)tile_net_update(dev);
+       if (priv->loopify_channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+                       netdev_warn(dev, "Failed to close loopify link!\n");
+               priv->loopify_channel = -1;
+       }
+       if (priv->channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->link) != 0)
+                       netdev_warn(dev, "Failed to close link!\n");
+               priv->channel = -1;
+       }
+       priv->echannel = -1;
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       return 0;
+}
+
+/* Determine the VA for a fragment. */
+static inline void *tile_net_frag_buf(skb_frag_t *f)
+{
+       unsigned long pfn = page_to_pfn(skb_frag_page(f));
+       return pfn_to_kaddr(pfn) + f->page_offset;
+}
+
+/* Acquire a completion entry and an egress slot, or if we can't,
+ * stop the queue and schedule the tx_wake timer.
+ */
+static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+                                      struct tile_net_comps *comps,
+                                      gxio_mpipe_equeue_t *equeue,
+                                      int num_edescs)
+{
+       /* Try to acquire a completion entry. */
+       if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
+           tile_net_free_comps(equeue, comps, 32, false) != 0) {
+
+               /* Try to acquire an egress slot. */
+               s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+               if (slot >= 0)
+                       return slot;
+
+               /* Freeing some completions gives the equeue time to drain. */
+               tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
+
+               slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+               if (slot >= 0)
+                       return slot;
+       }
+
+       /* Still nothing; give up and stop the queue for a short while. */
+       netif_stop_subqueue(dev, smp_processor_id());
+       tile_net_schedule_tx_wake_timer(dev);
+       return -1;
+}
+
+/* Determine how many edesc's are needed for TSO.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments.  This requires special care.
+ */
+static int tso_count_edescs(struct sk_buff *skb)
+{
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       unsigned int data_len = skb->data_len;
+       unsigned int p_len = sh->gso_size;
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       int num_edescs = 0;
+       int segment;
+
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+
+               unsigned int p_used = 0;
+
+               /* One edesc for header and for each piece of the payload. */
+               for (num_edescs++; p_used < p_len; num_edescs++) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+               }
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       return num_edescs;
+}
+
+/* Prepare modified copies of the skbuff headers.
+ * FIXME: add support for IPv6.
+ */
+static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
+                               s64 slot)
+{
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       struct iphdr *ih;
+       struct tcphdr *th;
+       unsigned int data_len = skb->data_len;
+       unsigned char *data = skb->data;
+       unsigned int ih_off, th_off, sh_len, p_len;
+       unsigned int isum_seed, tsum_seed, id, seq;
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       int segment;
+
+       /* Locate original headers and compute various lengths. */
+       ih = ip_hdr(skb);
+       th = tcp_hdr(skb);
+       ih_off = skb_network_offset(skb);
+       th_off = skb_transport_offset(skb);
+       sh_len = th_off + tcp_hdrlen(skb);
+       p_len = sh->gso_size;
+
+       /* Set up seed values for IP and TCP csum and initialize id and seq. */
+       isum_seed = ((0xFFFF - ih->check) +
+                    (0xFFFF - ih->tot_len) +
+                    (0xFFFF - ih->id));
+       tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+       id = ntohs(ih->id);
+       seq = ntohl(th->seq);
+
+       /* Prepare all the headers. */
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+               unsigned char *buf;
+               unsigned int p_used = 0;
+
+               /* Copy to the header memory for this segment. */
+               buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+                       NET_IP_ALIGN;
+               memcpy(buf, data, sh_len);
+
+               /* Update copied ip header. */
+               ih = (struct iphdr *)(buf + ih_off);
+               ih->tot_len = htons(sh_len + p_len - ih_off);
+               ih->id = htons(id);
+               ih->check = csum_long(isum_seed + ih->tot_len +
+                                     ih->id) ^ 0xffff;
+
+               /* Update copied tcp header. */
+               th = (struct tcphdr *)(buf + th_off);
+               th->seq = htonl(seq);
+               th->check = csum_long(tsum_seed + htons(sh_len + p_len));
+               if (segment != sh->gso_segs - 1) {
+                       th->fin = 0;
+                       th->psh = 0;
+               }
+
+               /* Skip past the header. */
+               slot++;
+
+               /* Skip past the payload. */
+               while (p_used < p_len) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+
+                       slot++;
+               }
+
+               id++;
+               seq += p_len;
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       /* Flush the headers so they are ready for hardware DMA. */
+       wmb();
+}
+
+/* Pass all the data to mpipe for egress. */
+static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
+                      struct sk_buff *skb, unsigned char *headers, s64 slot)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       unsigned int data_len = skb->data_len;
+       unsigned int p_len = sh->gso_size;
+       gxio_mpipe_edesc_t edesc_head = { { 0 } };
+       gxio_mpipe_edesc_t edesc_body = { { 0 } };
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       unsigned long tx_packets = 0, tx_bytes = 0;
+       unsigned int csum_start, sh_len;
+       int segment;
+
+       /* Prepare to egress the headers: set up header edesc. */
+       csum_start = skb_checksum_start_offset(skb);
+       sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       edesc_head.csum = 1;
+       edesc_head.csum_start = csum_start;
+       edesc_head.csum_dest = csum_start + skb->csum_offset;
+       edesc_head.xfer_size = sh_len;
+
+       /* This is only used to specify the TLB. */
+       edesc_head.stack_idx = large_buffer_stack;
+       edesc_body.stack_idx = large_buffer_stack;
+
+       /* Egress all the edescs. */
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+               void *va;
+               unsigned char *buf;
+               unsigned int p_used = 0;
+
+               /* Egress the header. */
+               buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+                       NET_IP_ALIGN;
+               edesc_head.va = va_to_tile_io_addr(buf);
+               gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
+               slot++;
+
+               /* Egress the payload. */
+               while (p_used < p_len) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+
+                       /* Egress a piece of the payload. */
+                       edesc_body.va = va_to_tile_io_addr(va);
+                       edesc_body.xfer_size = n;
+                       edesc_body.bound = !(p_used < p_len);
+                       gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
+                       slot++;
+               }
+
+               tx_packets++;
+               tx_bytes += sh_len + p_len;
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       /* Update stats. */
+       tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
+       tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+}
+
+/* Do "TSO" handling for egress.
+ *
+ * Normally drivers set NETIF_F_TSO only to support hardware TSO;
+ * otherwise the stack uses scatter-gather to implement GSO in software.
+ * On our testing, enabling GSO support (via NETIF_F_SG) drops network
+ * performance down to around 7.5 Gbps on the 10G interfaces, although
+ * also dropping cpu utilization way down, to under 8%.  But
+ * implementing "TSO" in the driver brings performance back up to line
+ * rate, while dropping cpu usage even further, to less than 4%.  In
+ * practice, profiling of GSO shows that skb_segment() is what causes
+ * the performance overheads; we benefit in the driver from using
+ * preallocated memory to duplicate the TCP/IP headers.
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int channel = priv->echannel;
+       struct tile_net_egress *egress = &egress_for_echannel[channel];
+       struct tile_net_comps *comps = info->comps_for_echannel[channel];
+       gxio_mpipe_equeue_t *equeue = egress->equeue;
+       unsigned long irqflags;
+       int num_edescs;
+       s64 slot;
+
+       /* Determine how many mpipe edesc's are needed. */
+       num_edescs = tso_count_edescs(skb);
+
+       local_irq_save(irqflags);
+
+       /* Try to acquire a completion entry and an egress slot. */
+       slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+       if (slot < 0) {
+               local_irq_restore(irqflags);
+               return NETDEV_TX_BUSY;
+       }
+
+       /* Set up copies of header data properly. */
+       tso_headers_prepare(skb, egress->headers, slot);
+
+       /* Actually pass the data to the network hardware. */
+       tso_egress(dev, equeue, skb, egress->headers, slot);
+
+       /* Add a completion record. */
+       add_comp(equeue, comps, slot + num_edescs - 1, skb);
+
+       local_irq_restore(irqflags);
+
+       /* Make sure the egress timer is scheduled. */
+       tile_net_schedule_egress_timer();
+
+       return NETDEV_TX_OK;
+}
+
+/* Analyze the body and frags for a transmit request. */
+static unsigned int tile_net_tx_frags(struct frag *frags,
+                                      struct sk_buff *skb,
+                                      void *b_data, unsigned int b_len)
+{
+       unsigned int i, n = 0;
+
+       struct skb_shared_info *sh = skb_shinfo(skb);
+
+       if (b_len != 0) {
+               frags[n].buf = b_data;
+               frags[n++].length = b_len;
+       }
+
+       for (i = 0; i < sh->nr_frags; i++) {
+               skb_frag_t *f = &sh->frags[i];
+               frags[n].buf = tile_net_frag_buf(f);
+               frags[n++].length = skb_frag_size(f);
+       }
+
+       return n;
+}
+
+/* Help the kernel transmit a packet. */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+       struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+       gxio_mpipe_equeue_t *equeue = egress->equeue;
+       struct tile_net_comps *comps =
+               info->comps_for_echannel[priv->echannel];
+       unsigned int len = skb->len;
+       unsigned char *data = skb->data;
+       unsigned int num_edescs;
+       struct frag frags[MAX_FRAGS];
+       gxio_mpipe_edesc_t edescs[MAX_FRAGS];
+       unsigned long irqflags;
+       gxio_mpipe_edesc_t edesc = { { 0 } };
+       unsigned int i;
+       s64 slot;
+
+       if (skb_is_gso(skb))
+               return tile_net_tx_tso(skb, dev);
+
+       num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+       /* This is only used to specify the TLB. */
+       edesc.stack_idx = large_buffer_stack;
+
+       /* Prepare the edescs. */
+       for (i = 0; i < num_edescs; i++) {
+               edesc.xfer_size = frags[i].length;
+               edesc.va = va_to_tile_io_addr(frags[i].buf);
+               edescs[i] = edesc;
+       }
+
+       /* Mark the final edesc. */
+       edescs[num_edescs - 1].bound = 1;
+
+       /* Add checksum info to the initial edesc, if needed. */
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               unsigned int csum_start = skb_checksum_start_offset(skb);
+               edescs[0].csum = 1;
+               edescs[0].csum_start = csum_start;
+               edescs[0].csum_dest = csum_start + skb->csum_offset;
+       }
+
+       local_irq_save(irqflags);
+
+       /* Try to acquire a completion entry and an egress slot. */
+       slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+       if (slot < 0) {
+               local_irq_restore(irqflags);
+               return NETDEV_TX_BUSY;
+       }
+
+       for (i = 0; i < num_edescs; i++)
+               gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+
+       /* Add a completion record. */
+       add_comp(equeue, comps, slot - 1, skb);
+
+       /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
+       tile_net_stats_add(1, &priv->stats.tx_packets);
+       tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
+                          &priv->stats.tx_bytes);
+
+       local_irq_restore(irqflags);
+
+       /* Make sure the egress timer is scheduled. */
+       tile_net_schedule_egress_timer();
+
+       return NETDEV_TX_OK;
+}
+
+/* Return subqueue id on this core (one per core). */
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+       return smp_processor_id();
+}
+
+/* Deal with a transmit timeout. */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               netif_wake_subqueue(dev, cpu);
+}
+
+/* Ioctl commands. */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       return -EOPNOTSUPP;
+}
+
+/* Get system network statistics for device. */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       return &priv->stats;
+}
+
+/* Change the MTU. */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+       if ((new_mtu < 68) || (new_mtu > 1500))
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+/* Change the Ethernet address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address.  However,
+ * the hardware does not do anything with the MAC address, so the address
+ * which gets used on outgoing packets, and which is accepted on incoming
+ * packets, is completely up to us.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+       disable_percpu_irq(ingress_irq);
+       tile_net_handle_ingress_irq(ingress_irq, NULL);
+       enable_percpu_irq(ingress_irq, 0);
+}
+#endif
+
+static const struct net_device_ops tile_net_ops = {
+       .ndo_open = tile_net_open,
+       .ndo_stop = tile_net_stop,
+       .ndo_start_xmit = tile_net_tx,
+       .ndo_select_queue = tile_net_select_queue,
+       .ndo_do_ioctl = tile_net_ioctl,
+       .ndo_get_stats = tile_net_get_stats,
+       .ndo_change_mtu = tile_net_change_mtu,
+       .ndo_tx_timeout = tile_net_tx_timeout,
+       .ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = tile_net_netpoll,
+#endif
+};
+
+/* The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+       ether_setup(dev);
+       dev->netdev_ops = &tile_net_ops;
+       dev->watchdog_timeo = TILE_NET_TIMEOUT;
+       dev->features |= NETIF_F_LLTX;
+       dev->features |= NETIF_F_HW_CSUM;
+       dev->features |= NETIF_F_SG;
+       dev->features |= NETIF_F_TSO;
+       dev->mtu = 1500;
+}
+
+/* Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static void tile_net_dev_init(const char *name, const uint8_t *mac)
+{
+       int ret;
+       int i;
+       int nz_addr = 0;
+       struct net_device *dev;
+       struct tile_net_priv *priv;
+
+       /* HACK: Ignore "loop" links. */
+       if (strncmp(name, "loop", 4) == 0)
+               return;
+
+       /* Allocate the device structure.  Normally, "name" is a
+        * template, instantiated by register_netdev(), but not for us.
+        */
+       dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
+                              NR_CPUS, 1);
+       if (!dev) {
+               pr_err("alloc_netdev_mqs(%s) failed\n", name);
+               return;
+       }
+
+       /* Initialize "priv". */
+       priv = netdev_priv(dev);
+       memset(priv, 0, sizeof(*priv));
+       priv->dev = dev;
+       priv->channel = -1;
+       priv->loopify_channel = -1;
+       priv->echannel = -1;
+
+       /* Get the MAC address and set it in the device struct; this must
+        * be done before the device is opened.  If the MAC is all zeroes,
+        * we use a random address, since we're probably on the simulator.
+        */
+       for (i = 0; i < 6; i++)
+               nz_addr |= mac[i];
+
+       if (nz_addr) {
+               memcpy(dev->dev_addr, mac, 6);
+               dev->addr_len = 6;
+       } else {
+               random_ether_addr(dev->dev_addr);
+       }
+
+       /* Register the network device. */
+       ret = register_netdev(dev);
+       if (ret) {
+               netdev_err(dev, "register_netdev failed %d\n", ret);
+               free_netdev(dev);
+               return;
+       }
+}
+
+/* Per-cpu module initialization. */
+static void tile_net_init_module_percpu(void *unused)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       int my_cpu = smp_processor_id();
+
+       info->has_iqueue = false;
+
+       info->my_cpu = my_cpu;
+
+       /* Initialize the egress timer. */
+       hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       info->egress_timer.function = tile_net_handle_egress_timer;
+}
+
+/* Module initialization. */
+static int __init tile_net_init_module(void)
+{
+       int i;
+       char name[GXIO_MPIPE_LINK_NAME_LEN];
+       uint8_t mac[6];
+
+       pr_info("Tilera Network Driver\n");
+
+       mutex_init(&tile_net_devs_for_channel_mutex);
+
+       /* Initialize each CPU. */
+       on_each_cpu(tile_net_init_module_percpu, NULL, 1);
+
+       /* Find out what devices we have, and initialize them. */
+       for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
+               tile_net_dev_init(name, mac);
+
+       if (!network_cpus_init())
+               network_cpus_map = *cpu_online_mask;
+
+       return 0;
+}
+
+module_init(tile_net_init_module);
index 4ffcd57b011b142fd367530ba032b3495a74a657..2857ab078aac1f9940f406f49916706dc3255441 100644 (file)
@@ -478,6 +478,7 @@ struct netvsc_device {
        u32 nvsp_version;
 
        atomic_t num_outstanding_sends;
+       wait_queue_head_t wait_drain;
        bool start_remove;
        bool destroy;
        /*
index 8b919471472fb1dba4d34ffcf0bfe5b4c723af7b..0c569831db5aeb0de77ef9a4d81d6d7e7f3281b8 100644 (file)
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
        if (!net_device)
                return NULL;
 
+       init_waitqueue_head(&net_device->wait_drain);
        net_device->start_remove = false;
        net_device->destroy = false;
        net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
        spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
 
        /* Wait for all send completions */
-       while (atomic_read(&net_device->num_outstanding_sends)) {
-               dev_info(&device->device,
-                       "waiting for %d requests to complete...\n",
-                       atomic_read(&net_device->num_outstanding_sends));
-               udelay(100);
-       }
+       wait_event(net_device->wait_drain,
+                  atomic_read(&net_device->num_outstanding_sends) == 0);
 
        netvsc_disconnect_vsp(net_device);
 
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
                num_outstanding_sends =
                        atomic_dec_return(&net_device->num_outstanding_sends);
 
+               if (net_device->destroy && num_outstanding_sends == 0)
+                       wake_up(&net_device->wait_drain);
+
                if (netif_queue_stopped(ndev) && !net_device->start_remove &&
                        (hv_ringbuf_avail_percent(&device->channel->outbound)
                        > RING_AVAIL_PERCENT_HIWATER ||
index 5ac46f5226f3c5b4b1d35e3450ec922326902896..47f8e8939266fd64ce097e49362a8e13a7019ef1 100644 (file)
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
 #define IP1001_APS_ON                  11      /* IP1001 APS Mode  bit */
 #define IP101A_G_APS_ON                        2       /* IP101A/G APS Mode bit */
 #define IP101A_G_IRQ_CONF_STATUS       0x11    /* Conf Info IRQ & Status Reg */
+#define        IP101A_G_IRQ_PIN_USED           (1<<15) /* INTR pin used */
+#define        IP101A_G_IRQ_DEFAULT            IP101A_G_IRQ_PIN_USED
 
 static int ip175c_config_init(struct phy_device *phydev)
 {
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
        if (c < 0)
                return c;
 
+       /* INTR pin used: speed/link/duplex will cause an interrupt */
+       c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+       if (c < 0)
+               return c;
+
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
                /* Additional delay (2ns) used to adjust RX clock phase
                 * at RGMII interface */
index 39ea0674dcde8cde6b31d71202b9b81873897ba6..5c120189ec86d419866b7320c36f4d618dc5df64 100644 (file)
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
        struct mdio_mux_parent_bus *pb = cb->parent;
        int r;
 
-       mutex_lock(&pb->mii_bus->mdio_lock);
+       /* In theory multiple mdio_mux could be stacked, thus creating
+        * more than a single level of nesting.  But in practice,
+        * SINGLE_DEPTH_NESTING will cover the vast majority of use
+        * cases.  We use it, instead of trying to handle the general
+        * case.
+        */
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
 
        int r;
 
-       mutex_lock(&pb->mii_bus->mdio_lock);
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
index 683ef1ce55196315a90f69f35f015b4773899830..5061608f408c67a41ab6a0432f10a206032af56a 100644 (file)
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
 }
 /**
  * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
- * @mdio_np: Pointer to the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
  *
  * Returns a pointer to the mii_bus, or NULL if none found.
  *
index 590f902deb6ba29f348c78dad429275cac23a276..9d6c80c8a0cf7693eacf92356c107be15fe78760 100644 (file)
@@ -161,7 +161,7 @@ static struct phy_driver ks8051_driver = {
 static struct phy_driver ks8001_driver = {
        .phy_id         = PHY_ID_KS8001,
        .name           = "Micrel KS8001 or KS8721",
-       .phy_id_mask    = 0x00fffff0,
+       .phy_id_mask    = 0x00ffffff,
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
@@ -174,7 +174,7 @@ static struct phy_driver ks8001_driver = {
 
 static struct phy_driver ksz9021_driver = {
        .phy_id         = PHY_ID_KSZ9021,
-       .phy_id_mask    = 0x000fff10,
+       .phy_id_mask    = 0x000ffffe,
        .name           = "Micrel KSZ9021 Gigabit PHY",
        .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
@@ -240,8 +240,8 @@ MODULE_AUTHOR("David J. Choi");
 MODULE_LICENSE("GPL");
 
 static struct mdio_device_id __maybe_unused micrel_tbl[] = {
-       { PHY_ID_KSZ9021, 0x000fff10 },
-       { PHY_ID_KS8001, 0x00fffff0 },
+       { PHY_ID_KSZ9021, 0x000ffffe },
+       { PHY_ID_KS8001, 0x00ffffff },
        { PHY_ID_KS8737, 0x00fffff0 },
        { PHY_ID_KS8041, 0x00fffff0 },
        { PHY_ID_KS8051, 0x00fffff0 },
index 71e2b0523bc2db243704abab76ce8bd32a47d6d9..3ae80eccd0efd9802e5e997d53cd54946ef6d788 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
 
 #define DRIVER_VERSION "22-Dec-2011"
 #define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                        return 0;
                }
 
-               if ((size > dev->net->mtu + ETH_HLEN) ||
+               if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
                    (size + offset > skb->len)) {
                        netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
                                   size);
index 964031e3da877f64c54cde0fa92f7cc6b7b06f69..a28a983d465e6d434a3272a36a9ab35a7b5b3eee 100644 (file)
@@ -59,6 +59,7 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4   0x1297
+#define USB_PRODUCT_IPAD 0x129a
 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
 #define USB_PRODUCT_IPHONE_4S  0x12a0
 
@@ -100,6 +101,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { USB_DEVICE_AND_INTERFACE_INFO(
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
index add1064f755dd4e78fb27ed5a49604a6165a2e50..03c2d8d653df1afa52ee90744f0b33ec4d7baec5 100644 (file)
@@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        return skb->len > 0;
 }
 
+static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+{
+       u8 *buf = urb->transfer_buffer;
+       bool link;
+
+       if (urb->actual_length < 16)
+               return;
+
+       link = !(buf[1] & 0x20);
+       if (netif_carrier_ok(dev->net) != link) {
+               if (link) {
+                       netif_carrier_on(dev->net);
+                       usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+               } else
+                       netif_carrier_off(dev->net);
+               netdev_dbg(dev->net, "Link Status is: %d\n", link);
+       }
+}
+
 static const struct driver_info moschip_info = {
        .description    = "MOSCHIP 7830/7832/7730 usb-NET adapter",
        .bind           = mcs7830_bind,
        .rx_fixup       = mcs7830_rx_fixup,
-       .flags          = FLAG_ETHER,
+       .flags          = FLAG_ETHER | FLAG_LINK_INTR,
+       .status         = mcs7830_status,
        .in             = 1,
        .out            = 2,
 };
@@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
        .description    = "Sitecom LN-30 usb-NET adapter",
        .bind           = mcs7830_bind,
        .rx_fixup       = mcs7830_rx_fixup,
-       .flags          = FLAG_ETHER,
+       .flags          = FLAG_ETHER | FLAG_LINK_INTR,
+       .status         = mcs7830_status,
        .in             = 1,
        .out            = 2,
 };
index 380dbea6109de022c97865775fb401e7bf199838..a051cedd64bde58179a1cb539b94a888fcd29416 100644 (file)
@@ -197,6 +197,10 @@ err:
 static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
+
+       /* can be called while disconnecting */
+       if (!dev)
+               return 0;
        return qmi_wwan_manage_power(dev, on);
 }
 
@@ -257,29 +261,6 @@ err:
        return rv;
 }
 
-/* Gobi devices uses identical class/protocol codes for all interfaces regardless
- * of function. Some of these are CDC ACM like and have the exact same endpoints
- * we are looking for. This leaves two possible strategies for identifying the
- * correct interface:
- *   a) hardcoding interface number, or
- *   b) use the fact that the wwan interface is the only one lacking additional
- *      (CDC functional) descriptors
- *
- * Let's see if we can get away with the generic b) solution.
- */
-static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
-{
-       int rv = -EINVAL;
-
-       /* ignore any interface with additional descriptors */
-       if (intf->cur_altsetting->extralen)
-               goto err;
-
-       rv = qmi_wwan_bind_shared(dev, intf);
-err:
-       return rv;
-}
-
 static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
 {
        struct usb_driver *subdriver = (void *)dev->data[0];
@@ -347,15 +328,15 @@ static const struct driver_info   qmi_wwan_shared = {
        .manage_power   = qmi_wwan_manage_power,
 };
 
-static const struct driver_info        qmi_wwan_gobi = {
-       .description    = "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info        qmi_wwan_force_int0 = {
+       .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_gobi,
+       .bind           = qmi_wwan_bind_shared,
        .unbind         = qmi_wwan_unbind_shared,
        .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(0), /* interface whitelist bitmap */
 };
 
-/* ZTE suck at making USB descriptors */
 static const struct driver_info        qmi_wwan_force_int1 = {
        .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
@@ -365,6 +346,24 @@ static const struct driver_info    qmi_wwan_force_int1 = {
        .data           = BIT(1), /* interface whitelist bitmap */
 };
 
+static const struct driver_info qmi_wwan_force_int2 = {
+       .description    = "Qualcomm WWAN/QMI device",
+       .flags          = FLAG_WWAN,
+       .bind           = qmi_wwan_bind_shared,
+       .unbind         = qmi_wwan_unbind_shared,
+       .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(2), /* interface whitelist bitmap */
+};
+
+static const struct driver_info        qmi_wwan_force_int3 = {
+       .description    = "Qualcomm WWAN/QMI device",
+       .flags          = FLAG_WWAN,
+       .bind           = qmi_wwan_bind_shared,
+       .unbind         = qmi_wwan_unbind_shared,
+       .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(3), /* interface whitelist bitmap */
+};
+
 static const struct driver_info        qmi_wwan_force_int4 = {
        .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
@@ -390,16 +389,23 @@ static const struct driver_info   qmi_wwan_force_int4 = {
 static const struct driver_info        qmi_wwan_sierra = {
        .description    = "Sierra Wireless wwan/QMI device",
        .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_gobi,
+       .bind           = qmi_wwan_bind_shared,
        .unbind         = qmi_wwan_unbind_shared,
        .manage_power   = qmi_wwan_manage_power,
        .data           = BIT(8) | BIT(19), /* interface whitelist bitmap */
 };
 
 #define HUAWEI_VENDOR_ID       0x12D1
+
+/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
+#define QMI_GOBI1K_DEVICE(vend, prod) \
+       USB_DEVICE(vend, prod), \
+       .driver_info = (unsigned long)&qmi_wwan_force_int3
+
+/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
 #define QMI_GOBI_DEVICE(vend, prod) \
        USB_DEVICE(vend, prod), \
-       .driver_info = (unsigned long)&qmi_wwan_gobi
+       .driver_info = (unsigned long)&qmi_wwan_force_int0
 
 static const struct usb_device_id products[] = {
        {       /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -501,6 +507,15 @@ static const struct usb_device_id products[] = {
                .bInterfaceProtocol = 0xff,
                .driver_info        = (unsigned long)&qmi_wwan_force_int4,
        },
+       {       /* ZTE MF60 */
+               .match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+               .idVendor           = 0x19d2,
+               .idProduct          = 0x1402,
+               .bInterfaceClass    = 0xff,
+               .bInterfaceSubClass = 0xff,
+               .bInterfaceProtocol = 0xff,
+               .driver_info        = (unsigned long)&qmi_wwan_force_int2,
+       },
        {       /* Sierra Wireless MC77xx in QMI mode */
                .match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
                .idVendor           = 0x1199,
@@ -510,20 +525,24 @@ static const struct usb_device_id products[] = {
                .bInterfaceProtocol = 0xff,
                .driver_info        = (unsigned long)&qmi_wwan_sierra,
        },
-       {QMI_GOBI_DEVICE(0x05c6, 0x9212)},      /* Acer Gobi Modem Device */
-       {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)},      /* HP un2400 Gobi Modem Device */
-       {QMI_GOBI_DEVICE(0x03f0, 0x371d)},      /* HP un2430 Mobile Broadband Module */
-       {QMI_GOBI_DEVICE(0x04da, 0x250d)},      /* Panasonic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x413c, 0x8172)},      /* Dell Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x1410, 0xa001)},      /* Novatel Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x0b05, 0x1776)},      /* Asus Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x19d2, 0xfff3)},      /* ONDA Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9001)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9002)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9202)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9203)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9222)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9009)},      /* Generic Gobi Modem device */
+
+       /* Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+       {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},    /* HP un2400 Gobi Modem Device */
+       {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)},    /* HP un2430 Mobile Broadband Module */
+       {QMI_GOBI1K_DEVICE(0x04da, 0x250d)},    /* Panasonic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x413c, 0x8172)},    /* Dell Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa001)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)},    /* Asus Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)},    /* ONDA Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)},    /* Generic Gobi Modem device */
+
+       /* Gobi 2000 and 3000 devices */
        {QMI_GOBI_DEVICE(0x413c, 0x8186)},      /* Dell Gobi 2000 Modem device (N0218, VU936) */
        {QMI_GOBI_DEVICE(0x05c6, 0x920b)},      /* Generic Gobi 2000 Modem device */
        {QMI_GOBI_DEVICE(0x05c6, 0x9225)},      /* Sony Gobi 2000 Modem device (N0279, VU730) */
@@ -547,6 +566,8 @@ static const struct usb_device_id products[] = {
        {QMI_GOBI_DEVICE(0x16d8, 0x8002)},      /* CMDTech Gobi 2000 Modem device (VU922) */
        {QMI_GOBI_DEVICE(0x05c6, 0x9205)},      /* Gobi 2000 Modem device */
        {QMI_GOBI_DEVICE(0x1199, 0x9013)},      /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+       {QMI_GOBI_DEVICE(0x1199, 0x9015)},      /* Sierra Wireless Gobi 3000 Modem device */
+       {QMI_GOBI_DEVICE(0x1199, 0x9019)},      /* Sierra Wireless Gobi 3000 Modem device */
        { }                                     /* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 3faef5670d1ff60547ec9fcbb42eb956fa7b58df..d75d1f56becff95ae9cf7b8f8dc08990d142d06c 100644 (file)
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 }
 
 static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
        .rx_urb_size = 8 * 1024,
        .whitelist = {
                .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
        }
 };
 
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
        .description = "Sierra Wireless USB-to-WWAN Modem",
        .flags = FLAG_WWAN | FLAG_SEND_ZLP,
        .bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
        .status = sierra_net_status,
        .rx_fixup = sierra_net_rx_fixup,
        .tx_fixup = sierra_net_tx_fixup,
-       .data = (unsigned long)&sierra_net_info_data_68A3,
+       .data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
 static const struct usb_device_id products[] = {
        {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
-       .driver_info = (unsigned long) &sierra_net_info_68A3},
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
 
        {}, /* last item */
 };
index 9f58330f1312059d37deeb072b2d5c32d3d6df03..aba769d77459b61e6cc42deff0a101acd709af89 100644 (file)
@@ -796,11 +796,13 @@ int usbnet_open (struct net_device *net)
        if (info->manage_power) {
                retval = info->manage_power(dev, 1);
                if (retval < 0)
-                       goto done;
+                       goto done_manage_power_error;
                usb_autopm_put_interface(dev->intf);
        }
        return retval;
 
+done_manage_power_error:
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
 done:
        usb_autopm_put_interface(dev->intf);
 done_nopm:
@@ -876,9 +878,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
 {
        struct usbnet *dev = netdev_priv(net);
 
-       strncpy (info->driver, dev->driver_name, sizeof info->driver);
-       strncpy (info->version, DRIVER_VERSION, sizeof info->version);
-       strncpy (info->fw_version, dev->driver_info->description,
+       strlcpy (info->driver, dev->driver_name, sizeof info->driver);
+       strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
+       strlcpy (info->fw_version, dev->driver_info->description,
                sizeof info->fw_version);
        usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
 }
@@ -1202,6 +1204,21 @@ deferred:
 }
 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
 
+static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+{
+       struct urb      *urb;
+       int             i;
+
+       /* don't refill the queue all at once */
+       for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
+               urb = usb_alloc_urb(0, flags);
+               if (urb != NULL) {
+                       if (rx_submit(dev, urb, flags) == -ENOLINK)
+                               return;
+               }
+       }
+}
+
 /*-------------------------------------------------------------------------*/
 
 // tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1258,14 @@ static void usbnet_bh (unsigned long param)
                   !timer_pending (&dev->delay) &&
                   !test_bit (EVENT_RX_HALT, &dev->flags)) {
                int     temp = dev->rxq.qlen;
-               int     qlen = RX_QLEN (dev);
-
-               if (temp < qlen) {
-                       struct urb      *urb;
-                       int             i;
-
-                       // don't refill the queue all at once
-                       for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
-                               urb = usb_alloc_urb (0, GFP_ATOMIC);
-                               if (urb != NULL) {
-                                       if (rx_submit (dev, urb, GFP_ATOMIC) ==
-                                           -ENOLINK)
-                                               return;
-                               }
-                       }
+
+               if (temp < RX_QLEN(dev)) {
+                       rx_alloc_submit(dev, GFP_ATOMIC);
                        if (temp != dev->rxq.qlen)
                                netif_dbg(dev, link, dev->net,
                                          "rxqlen %d --> %d\n",
                                          temp, dev->rxq.qlen);
-                       if (dev->rxq.qlen < qlen)
+                       if (dev->rxq.qlen < RX_QLEN(dev))
                                tasklet_schedule (&dev->bh);
                }
                if (dev->txq.qlen < TX_QLEN (dev))
@@ -1513,6 +1518,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
                spin_lock_irq(&dev->txq.lock);
                /* don't autosuspend while transmitting */
                if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+                       dev->suspend_count--;
                        spin_unlock_irq(&dev->txq.lock);
                        return -EBUSY;
                } else {
@@ -1569,6 +1575,13 @@ int usbnet_resume (struct usb_interface *intf)
                spin_unlock_irq(&dev->txq.lock);
 
                if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+                       /* handle remote wakeup ASAP */
+                       if (!dev->wait &&
+                               netif_device_present(dev->net) &&
+                               !timer_pending(&dev->delay) &&
+                               !test_bit(EVENT_RX_HALT, &dev->flags))
+                                       rx_alloc_submit(dev, GFP_KERNEL);
+
                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
                                netif_tx_wake_all_queues(dev->net);
                        tasklet_schedule (&dev->bh);
index 9ce6995e8d084d046beb75f80712cd1b16a4aaf3..f18149ae2588e682e661100f026f7bcf60a16d06 100644 (file)
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
-       struct u64_stats_sync syncp;
+       struct u64_stats_sync tx_syncp;
+       struct u64_stats_sync rx_syncp;
        u64 tx_bytes;
        u64 tx_packets;
 
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
 
        hdr = skb_vnet_hdr(skb);
 
-       u64_stats_update_begin(&stats->syncp);
+       u64_stats_update_begin(&stats->rx_syncp);
        stats->rx_bytes += skb->len;
        stats->rx_packets++;
-       u64_stats_update_end(&stats->syncp);
+       u64_stats_update_end(&stats->rx_syncp);
 
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
                pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
        while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
                pr_debug("Sent skb %p\n", skb);
 
-               u64_stats_update_begin(&stats->syncp);
+               u64_stats_update_begin(&stats->tx_syncp);
                stats->tx_bytes += skb->len;
                stats->tx_packets++;
-               u64_stats_update_end(&stats->syncp);
+               u64_stats_update_end(&stats->tx_syncp);
 
                tot_sgs += skb_vnet_hdr(skb)->num_sg;
                dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
                u64 tpackets, tbytes, rpackets, rbytes;
 
                do {
-                       start = u64_stats_fetch_begin(&stats->syncp);
+                       start = u64_stats_fetch_begin(&stats->tx_syncp);
                        tpackets = stats->tx_packets;
                        tbytes   = stats->tx_bytes;
+               } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
+
+               do {
+                       start = u64_stats_fetch_begin(&stats->rx_syncp);
                        rpackets = stats->rx_packets;
                        rbytes   = stats->rx_bytes;
-               } while (u64_stats_fetch_retry(&stats->syncp, start));
+               } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
 
                tot->rx_packets += rpackets;
                tot->tx_packets += tpackets;
@@ -1231,11 +1236,6 @@ static int virtnet_freeze(struct virtio_device *vdev)
        vi->config_enable = false;
        mutex_unlock(&vi->config_lock);
 
-       virtqueue_disable_cb(vi->rvq);
-       virtqueue_disable_cb(vi->svq);
-       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
-               virtqueue_disable_cb(vi->cvq);
-
        netif_device_detach(vi->dev);
        cancel_delayed_work_sync(&vi->refill);
 
index 520a4b2eb9cc8b2d6c2a1e1281cf243b8ec8a9f4..a747c632597aab52e9e78f8f8c21a614f44b6fb4 100644 (file)
@@ -7233,8 +7233,8 @@ static int airo_get_aplist(struct net_device *dev,
                }
        } else {
                dwrq->flags = 1; /* Should be define'd */
-               memcpy(extra + sizeof(struct sockaddr)*i,
-                      &qual,  sizeof(struct iw_quality)*i);
+               memcpy(extra + sizeof(struct sockaddr) * i, qual,
+                      sizeof(struct iw_quality) * i);
        }
        dwrq->length = i;
 
index c54b7d37bff159f1fc8a7482a4d8bdfe806b9beb..420d69b2674ceae1282fcc9d8cbbc38d8c5d00af 100644 (file)
@@ -143,6 +143,7 @@ struct ath_common {
        u32 keymax;
        DECLARE_BITMAP(keymap, ATH_KEYMAX);
        DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+       DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
        enum ath_crypt_caps crypt_caps;
 
        unsigned int clockrate;
index 0ba81a66061fca201ae7917e21b1ffd6c703a91d..44ad6fe0278f410ce51019a24c13ee996a0f71bb 100644 (file)
@@ -1045,11 +1045,11 @@ ath5k_drain_tx_buffs(struct ath5k_hw *ah)
 
                                ath5k_txbuf_free_skb(ah, bf);
 
-                               spin_lock_bh(&ah->txbuflock);
+                               spin_lock(&ah->txbuflock);
                                list_move_tail(&bf->list, &ah->txbuf);
                                ah->txbuf_len++;
                                txq->txq_len--;
-                               spin_unlock_bh(&ah->txbuflock);
+                               spin_unlock(&ah->txbuflock);
                        }
                        txq->link = NULL;
                        txq->txq_poll_mark = false;
@@ -2415,6 +2415,22 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
 * Initialization routines *
 \*************************/
 
+static const struct ieee80211_iface_limit if_limits[] = {
+       { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) },
+       { .max = 4,     .types =
+#ifdef CONFIG_MAC80211_MESH
+                                BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                                BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+       .limits = if_limits,
+       .n_limits = ARRAY_SIZE(if_limits),
+       .max_interfaces = 2048,
+       .num_different_channels = 1,
+};
+
 int __devinit
 ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
 {
@@ -2436,6 +2452,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
                BIT(NL80211_IFTYPE_ADHOC) |
                BIT(NL80211_IFTYPE_MESH_POINT);
 
+       hw->wiphy->iface_combinations = &if_comb;
+       hw->wiphy->n_iface_combinations = 1;
+
        /* SW support for IBSS_RSN is provided by mac80211 */
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
index ac53d901801deb037db4ca11137812d1df6d458f..dfb0441f406c24e59c10fac636007d7337cd4b65 100644 (file)
@@ -3809,7 +3809,7 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
        return true;
 }
 
-static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
 {
        int internal_regulator =
                ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
index 2505ac44f0c16ff248be27b145b91cd1b90dde49..8396d150ce01d4303174aab4a1e9071d70ca09de 100644 (file)
@@ -334,4 +334,7 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
 
 unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
                                           struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
 #endif
index f11d9b2677fd05753750e1311a351ac7b7a52cfe..1bd3a3d22101806aca2b06ebc9be5ee77afdddd7 100644 (file)
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
 #define INITVALS_9330_1P1_H
 
 static const u32 ar9331_1p1_baseband_postamble[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
        {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
        {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
        {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
        {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
        {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
-       {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+       {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
        {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
        {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
-       {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+       {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
        {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
-       {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+       {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
        {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
 };
 
 static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
-       /*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
        {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
 };
 
 static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
-       /*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
        {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
 };
 
 static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
-       /*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
        {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@ static const u32 ar9331_1p1_radio_core[][2] = {
        {0x000160b4, 0x92480040},
        {0x000160c0, 0x006db6db},
        {0x000160c4, 0x0186db60},
-       {0x000160c8, 0x6db6db6c},
+       {0x000160c8, 0x6db4db6c},
        {0x000160cc, 0x6de6c300},
        {0x000160d0, 0x14500820},
        {0x00016100, 0x04cb0001},
        {0x00016104, 0xfff80015},
        {0x00016108, 0x00080010},
        {0x0001610c, 0x00170000},
-       {0x00016140, 0x10804000},
+       {0x00016140, 0x10800000},
        {0x00016144, 0x01884080},
        {0x00016148, 0x000080c0},
        {0x00016280, 0x01000015},
@@ -417,7 +418,7 @@ static const u32 ar9331_1p1_radio_core[][2] = {
 };
 
 static const u32 ar9331_1p1_soc_postamble[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
 };
 
@@ -691,7 +692,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
 };
 
 static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
        {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
 };
 
 static const u32 ar9331_1p1_mac_postamble[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
        {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
        {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@ static const u32 ar9331_1p1_mac_core[][2] = {
 
 static const u32 ar9331_common_rx_gain_1p1[][2] = {
        /* Addr      allmodes  */
-       {0x0000a000, 0x00010000},
-       {0x0000a004, 0x00030002},
-       {0x0000a008, 0x00050004},
-       {0x0000a00c, 0x00810080},
-       {0x0000a010, 0x00830082},
-       {0x0000a014, 0x01810180},
-       {0x0000a018, 0x01830182},
-       {0x0000a01c, 0x01850184},
-       {0x0000a020, 0x01890188},
-       {0x0000a024, 0x018b018a},
-       {0x0000a028, 0x018d018c},
-       {0x0000a02c, 0x01910190},
-       {0x0000a030, 0x01930192},
-       {0x0000a034, 0x01950194},
-       {0x0000a038, 0x038a0196},
-       {0x0000a03c, 0x038c038b},
-       {0x0000a040, 0x0390038d},
-       {0x0000a044, 0x03920391},
-       {0x0000a048, 0x03940393},
-       {0x0000a04c, 0x03960395},
+       {0x00009e18, 0x05000000},
+       {0x0000a000, 0x00060005},
+       {0x0000a004, 0x00810080},
+       {0x0000a008, 0x00830082},
+       {0x0000a00c, 0x00850084},
+       {0x0000a010, 0x01820181},
+       {0x0000a014, 0x01840183},
+       {0x0000a018, 0x01880185},
+       {0x0000a01c, 0x018a0189},
+       {0x0000a020, 0x02850284},
+       {0x0000a024, 0x02890288},
+       {0x0000a028, 0x028b028a},
+       {0x0000a02c, 0x03850384},
+       {0x0000a030, 0x03890388},
+       {0x0000a034, 0x038b038a},
+       {0x0000a038, 0x038d038c},
+       {0x0000a03c, 0x03910390},
+       {0x0000a040, 0x03930392},
+       {0x0000a044, 0x03950394},
+       {0x0000a048, 0x00000396},
+       {0x0000a04c, 0x00000000},
        {0x0000a050, 0x00000000},
        {0x0000a054, 0x00000000},
        {0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a074, 0x00000000},
        {0x0000a078, 0x00000000},
        {0x0000a07c, 0x00000000},
-       {0x0000a080, 0x22222229},
-       {0x0000a084, 0x1d1d1d1d},
-       {0x0000a088, 0x1d1d1d1d},
-       {0x0000a08c, 0x1d1d1d1d},
-       {0x0000a090, 0x171d1d1d},
-       {0x0000a094, 0x11111717},
-       {0x0000a098, 0x00030311},
-       {0x0000a09c, 0x00000000},
-       {0x0000a0a0, 0x00000000},
+       {0x0000a080, 0x28282828},
+       {0x0000a084, 0x28282828},
+       {0x0000a088, 0x28282828},
+       {0x0000a08c, 0x28282828},
+       {0x0000a090, 0x28282828},
+       {0x0000a094, 0x24242428},
+       {0x0000a098, 0x171e1e1e},
+       {0x0000a09c, 0x02020b0b},
+       {0x0000a0a0, 0x02020202},
        {0x0000a0a4, 0x00000000},
        {0x0000a0a8, 0x00000000},
        {0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a0b4, 0x00000000},
        {0x0000a0b8, 0x00000000},
        {0x0000a0bc, 0x00000000},
-       {0x0000a0c0, 0x001f0000},
-       {0x0000a0c4, 0x01000101},
-       {0x0000a0c8, 0x011e011f},
-       {0x0000a0cc, 0x011c011d},
-       {0x0000a0d0, 0x02030204},
-       {0x0000a0d4, 0x02010202},
-       {0x0000a0d8, 0x021f0200},
-       {0x0000a0dc, 0x0302021e},
-       {0x0000a0e0, 0x03000301},
-       {0x0000a0e4, 0x031e031f},
-       {0x0000a0e8, 0x0402031d},
-       {0x0000a0ec, 0x04000401},
-       {0x0000a0f0, 0x041e041f},
-       {0x0000a0f4, 0x0502041d},
-       {0x0000a0f8, 0x05000501},
-       {0x0000a0fc, 0x051e051f},
-       {0x0000a100, 0x06010602},
-       {0x0000a104, 0x061f0600},
-       {0x0000a108, 0x061d061e},
-       {0x0000a10c, 0x07020703},
-       {0x0000a110, 0x07000701},
+       {0x0000a0c0, 0x22072208},
+       {0x0000a0c4, 0x22052206},
+       {0x0000a0c8, 0x22032204},
+       {0x0000a0cc, 0x22012202},
+       {0x0000a0d0, 0x221f2200},
+       {0x0000a0d4, 0x221d221e},
+       {0x0000a0d8, 0x33023303},
+       {0x0000a0dc, 0x33003301},
+       {0x0000a0e0, 0x331e331f},
+       {0x0000a0e4, 0x4402331d},
+       {0x0000a0e8, 0x44004401},
+       {0x0000a0ec, 0x441e441f},
+       {0x0000a0f0, 0x55025503},
+       {0x0000a0f4, 0x55005501},
+       {0x0000a0f8, 0x551e551f},
+       {0x0000a0fc, 0x6602551d},
+       {0x0000a100, 0x66006601},
+       {0x0000a104, 0x661e661f},
+       {0x0000a108, 0x7703661d},
+       {0x0000a10c, 0x77017702},
+       {0x0000a110, 0x00007700},
        {0x0000a114, 0x00000000},
        {0x0000a118, 0x00000000},
        {0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a138, 0x00000000},
        {0x0000a13c, 0x00000000},
        {0x0000a140, 0x001f0000},
-       {0x0000a144, 0x01000101},
-       {0x0000a148, 0x011e011f},
-       {0x0000a14c, 0x011c011d},
-       {0x0000a150, 0x02030204},
-       {0x0000a154, 0x02010202},
-       {0x0000a158, 0x021f0200},
-       {0x0000a15c, 0x0302021e},
-       {0x0000a160, 0x03000301},
-       {0x0000a164, 0x031e031f},
-       {0x0000a168, 0x0402031d},
-       {0x0000a16c, 0x04000401},
-       {0x0000a170, 0x041e041f},
-       {0x0000a174, 0x0502041d},
-       {0x0000a178, 0x05000501},
-       {0x0000a17c, 0x051e051f},
-       {0x0000a180, 0x06010602},
-       {0x0000a184, 0x061f0600},
-       {0x0000a188, 0x061d061e},
-       {0x0000a18c, 0x07020703},
-       {0x0000a190, 0x07000701},
+       {0x0000a144, 0x111f1100},
+       {0x0000a148, 0x111d111e},
+       {0x0000a14c, 0x111b111c},
+       {0x0000a150, 0x22032204},
+       {0x0000a154, 0x22012202},
+       {0x0000a158, 0x221f2200},
+       {0x0000a15c, 0x221d221e},
+       {0x0000a160, 0x33013302},
+       {0x0000a164, 0x331f3300},
+       {0x0000a168, 0x4402331e},
+       {0x0000a16c, 0x44004401},
+       {0x0000a170, 0x441e441f},
+       {0x0000a174, 0x55015502},
+       {0x0000a178, 0x551f5500},
+       {0x0000a17c, 0x6602551e},
+       {0x0000a180, 0x66006601},
+       {0x0000a184, 0x661e661f},
+       {0x0000a188, 0x7703661d},
+       {0x0000a18c, 0x77017702},
+       {0x0000a190, 0x00007700},
        {0x0000a194, 0x00000000},
        {0x0000a198, 0x00000000},
        {0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a1f0, 0x00000396},
        {0x0000a1f4, 0x00000396},
        {0x0000a1f8, 0x00000396},
-       {0x0000a1fc, 0x00000196},
+       {0x0000a1fc, 0x00000296},
 };
 
 static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
-       {0},
-       {3},
-       {0},
-       {0},
+       {0x00000000},
+       {0x00000003},
+       {0x00000000},
+       {0x00000000},
 };
 
 static const u32 ar9331_1p1_chansel_xtal_25M[] = {
index a277cf6f339d4364413548f95e2f7fa5823ad45f..4866550ddd965eb0370133bc21ad55b22e29b240 100644 (file)
@@ -214,6 +214,7 @@ struct ath_frame_info {
        enum ath9k_key_type keytype;
        u8 keyix;
        u8 retries;
+       u8 rtscts_rate;
 };
 
 struct ath_buf_state {
index 2b8f61c210e1c2dd74ef5a779a5282f96253b856..abbd6effd60d31eb6a871e101ac10d0bbfa16f2e 100644 (file)
@@ -1496,6 +1496,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                        priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
 
                if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
+                       ath9k_htc_choose_set_bssid(priv);
                        if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
                                ath9k_htc_start_ani(priv);
                        else if (priv->num_sta_assoc_vif == 0)
@@ -1503,13 +1504,11 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
-       if (changed & BSS_CHANGED_BSSID) {
+       if (changed & BSS_CHANGED_IBSS) {
                if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
                        common->curaid = bss_conf->aid;
                        memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
                        ath9k_htc_set_bssid(priv);
-               } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
-                       ath9k_htc_choose_set_bssid(priv);
                }
        }
 
index abe05ec85d501dbeaea089a19dd444fd079b0a4f..995ca8e1302efc7562ff9905d8e91d00ffb25f74 100644 (file)
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
                     !ah->is_pciexpress)) {
                        ah->config.serialize_regmode =
                                SER_REG_MODE_ON;
@@ -784,13 +784,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
 
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
 {
+       struct ath_common *common = ath9k_hw_common(ah);
+       int i = 0;
+
        REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
        udelay(100);
        REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
 
-       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
                udelay(100);
 
+               if (WARN_ON_ONCE(i >= 100)) {
+                       ath_err(common, "PLL4 meaurement not done\n");
+                       break;
+               }
+
+               i++;
+       }
+
        return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
 }
 EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
@@ -1468,6 +1480,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
                return false;
 
        ah->chip_fullsleep = false;
+
+       if (AR_SREV_9330(ah))
+               ar9003_hw_internal_regulator_apply(ah);
        ath9k_hw_init_pll(ah, chan);
        ath9k_hw_set_rfmode(ah, chan);
 
index dfa78e8b6470c02074025f5a94ff8be34058ad8e..dac1a2709e3cb7aa961240a088ef15fe3311bec2 100644 (file)
@@ -239,7 +239,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       bool ret;
+       bool ret = true;
 
        ieee80211_stop_queues(sc->hw);
 
@@ -250,11 +250,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
        ath9k_debug_samp_bb_mac(sc);
        ath9k_hw_disable_interrupts(ah);
 
-       ret = ath_drain_all_txq(sc, retry_tx);
-
        if (!ath_stoprecv(sc))
                ret = false;
 
+       if (!ath_drain_all_txq(sc, retry_tx))
+               ret = false;
+
        if (!flush) {
                if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
                        ath_rx_tasklet(sc, 1, true);
@@ -970,6 +971,15 @@ void ath_hw_pll_work(struct work_struct *work)
                                            hw_pll_work.work);
        u32 pll_sqsum;
 
+       /*
+        * ensure that the PLL WAR is executed only
+        * after the STA is associated (or) if the
+        * beaconing had started in interfaces that
+        * uses beacons.
+        */
+       if (!(sc->sc_flags & SC_OP_BEACONS))
+               return;
+
        if (AR_SREV_9485(sc->sc_ah)) {
 
                ath9k_ps_wakeup(sc);
@@ -1442,15 +1452,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
-           ((vif->type == NL80211_IFTYPE_ADHOC) &&
-            sc->nvifs > 0)) {
-               ath_err(common, "Cannot create ADHOC interface when other"
-                       " interfaces already exist.\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
 
        sc->nvifs++;
@@ -1475,15 +1476,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
 
-       /* See if new interface type is valid. */
-       if ((new_type == NL80211_IFTYPE_ADHOC) &&
-           (sc->nvifs > 1)) {
-               ath_err(common, "When using ADHOC, it must be the only"
-                       " interface.\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
        if (ath9k_uses_beacons(new_type) &&
            !ath9k_uses_beacons(vif->type)) {
                if (sc->nbcnvifs >= ATH_BCBUF) {
index e1fcc68124dc3bc78359cf72e2da459ddf9c4560..0735aeb3b26cefcd15c2847a1775718d36cecbc6 100644 (file)
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
                        __skb_unlink(skb, &rx_edma->rx_fifo);
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_edma_buf_link(sc, qtype);
-               } else {
-                       bf = NULL;
                }
+
+               bf = NULL;
        }
 
        *dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
         * descriptor does contain a valid key index. This has been observed
         * mostly with CCMP encryption.
         */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
        if (!rx_stats->rs_datalen) {
index 23eaa1b26ebe5ca9a1a242ea4de5e9fa6508b02c..4d571394c7a82523404db2d9ae3ef0ba0af760c6 100644 (file)
@@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
                                           struct ath_txq *txq,
                                           struct ath_atx_tid *tid,
-                                          struct sk_buff *skb);
+                                          struct sk_buff *skb,
+                                          bool dequeue);
 
 enum {
        MCS_HT20,
@@ -811,7 +812,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                fi = get_frame_info(skb);
                bf = fi->bf;
                if (!fi->bf)
-                       bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+                       bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
 
                if (!bf)
                        continue;
@@ -937,6 +938,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
        struct ieee80211_tx_rate *rates;
        const struct ieee80211_rate *rate;
        struct ieee80211_hdr *hdr;
+       struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
        int i;
        u8 rix = 0;
 
@@ -947,18 +949,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
 
        /* set dur_update_en for l-sig computation except for PS-Poll frames */
        info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
-
-       /*
-        * We check if Short Preamble is needed for the CTS rate by
-        * checking the BSS's global flag.
-        * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
-        */
-       rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
-       info->rtscts_rate = rate->hw_value;
-
-       if (tx_info->control.vif &&
-           tx_info->control.vif->bss_conf.use_short_preamble)
-               info->rtscts_rate |= rate->hw_value_short;
+       info->rtscts_rate = fi->rtscts_rate;
 
        for (i = 0; i < 4; i++) {
                bool is_40, is_sgi, is_sp;
@@ -1000,13 +991,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
                }
 
                /* legacy rates */
+               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
                    !(rate->flags & IEEE80211_RATE_ERP_G))
                        phy = WLAN_RC_PHY_CCK;
                else
                        phy = WLAN_RC_PHY_OFDM;
 
-               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                info->rates[i].Rate = rate->hw_value;
                if (rate->hw_value_short) {
                        if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
@@ -1726,7 +1717,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
                return;
        }
 
-       bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+       bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
        if (!bf)
                return;
 
@@ -1753,7 +1744,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
 
        bf = fi->bf;
        if (!bf)
-               bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+               bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
 
        if (!bf)
                return;
@@ -1775,10 +1766,22 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       const struct ieee80211_rate *rate;
        struct ath_frame_info *fi = get_frame_info(skb);
        struct ath_node *an = NULL;
        enum ath9k_key_type keytype;
+       bool short_preamble = false;
 
+       /*
+        * We check if Short Preamble is needed for the CTS rate by
+        * checking the BSS's global flag.
+        * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+        */
+       if (tx_info->control.vif &&
+           tx_info->control.vif->bss_conf.use_short_preamble)
+               short_preamble = true;
+
+       rate = ieee80211_get_rts_cts_rate(hw, tx_info);
        keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
 
        if (sta)
@@ -1793,6 +1796,9 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
                fi->keyix = ATH9K_TXKEYIX_INVALID;
        fi->keytype = keytype;
        fi->framelen = framelen;
+       fi->rtscts_rate = rate->hw_value;
+       if (short_preamble)
+               fi->rtscts_rate |= rate->hw_value_short;
 }
 
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
@@ -1814,7 +1820,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
                                           struct ath_txq *txq,
                                           struct ath_atx_tid *tid,
-                                          struct sk_buff *skb)
+                                          struct sk_buff *skb,
+                                          bool dequeue)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1870,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
        return bf;
 
 error:
+       if (dequeue)
+               __skb_unlink(skb, &tid->buf_q);
        dev_kfree_skb_any(skb);
        return NULL;
 }
@@ -1893,7 +1902,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
                 */
                ath_tx_send_ampdu(sc, tid, skb, txctl);
        } else {
-               bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+               bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
                if (!bf)
                        return;
 
index 0e81904956cf1c04fa37ad01e65282b55e2bf8f9..5c54aa43ca2d81b3936d917aeb68f7331aabb34b 100644 (file)
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
                return -EIO;
 
        set_bit(idx, common->keymap);
+       if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+               set_bit(idx, common->ccmp_keymap);
+
        if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
                set_bit(idx + 64, common->keymap);
                set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
                return;
 
        clear_bit(key->hw_key_idx, common->keymap);
+       clear_bit(key->hw_key_idx, common->ccmp_keymap);
        if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
                return;
 
index 67c13af6f206e04475996642b00698ac9e9d12f5..c06b6cb5c91ea6c64c14a4b38faf7c5a2dd74e1b 100644 (file)
@@ -877,6 +877,10 @@ struct b43_wl {
         * from the mac80211 subsystem. */
        u16 mac80211_initially_registered_queues;
 
+       /* Set this if we call ieee80211_register_hw() and check if we call
+        * ieee80211_unregister_hw(). */
+       bool hw_registred;
+
        /* We can only have one operating interface (802.11 core)
         * at a time. General information about this interface follows.
         */
index 5a39b226b2e3193958bf29472c3112c7361de0e5..1b988f26bdf1114b8fb24af9626d13599c329d08 100644 (file)
@@ -2437,6 +2437,7 @@ start_ieee80211:
        err = ieee80211_register_hw(wl->hw);
        if (err)
                goto err_one_core_detach;
+       wl->hw_registred = true;
        b43_leds_register(wl->current_dev);
        goto out;
 
@@ -3766,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
        if (prev_status >= B43_STAT_STARTED) {
                err = b43_wireless_core_start(up_dev);
                if (err) {
-                       b43err(wl, "Fatal: Coult not start device for "
+                       b43err(wl, "Fatal: Could not start device for "
                               "selected %s-GHz band\n",
                               band_to_string(chan->band));
                        b43_wireless_core_exit(up_dev);
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
 
        hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
        wl->mac80211_initially_registered_queues = hw->queues;
+       wl->hw_registred = false;
        hw->max_rates = 2;
        SET_IEEE80211_DEV(hw, dev->dev);
        if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
         * as the ieee80211 unreg will destroy the workqueue. */
        cancel_work_sync(&wldev->restart_work);
 
-       /* Restore the queues count before unregistering, because firmware detect
-        * might have modified it. Restoring is important, so the networking
-        * stack can properly free resources. */
-       wl->hw->queues = wl->mac80211_initially_registered_queues;
-       b43_leds_stop(wldev);
-       ieee80211_unregister_hw(wl->hw);
+       B43_WARN_ON(!wl);
+       if (wl->current_dev == wldev && wl->hw_registred) {
+               /* Restore the queues count before unregistering, because firmware detect
+                * might have modified it. Restoring is important, so the networking
+                * stack can properly free resources. */
+               wl->hw->queues = wl->mac80211_initially_registered_queues;
+               b43_leds_stop(wldev);
+               ieee80211_unregister_hw(wl->hw);
+       }
 
        b43_one_core_detach(wldev->dev);
 
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
        cancel_work_sync(&wldev->restart_work);
 
        B43_WARN_ON(!wl);
-       if (wl->current_dev == wldev) {
+       if (wl->current_dev == wldev && wl->hw_registred) {
                /* Restore the queues count before unregistering, because firmware detect
                 * might have modified it. Restoring is important, so the networking
                 * stack can properly free resources. */
index f1f8bd09bd87fd378cb405d6aa56a38eafe4fb3c..c8baf020c20f7a4fd73104bd56a87cae375b935e 100644 (file)
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
        /* create a bounce buffer in zone_dma on mapping failure. */
        if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
-               bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+               bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb) {
                        ring->current_slot = old_top_slot;
                        ring->used_slots = old_used_slots;
index cd9c9bc186d93099f56b8b912e905255ee7411c9..eae691e2f7dd4abcd1d08c5bf8bcf3e6872e97fd 100644 (file)
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
        if (prev_status >= B43legacy_STAT_STARTED) {
                err = b43legacy_wireless_core_start(up_dev);
                if (err) {
-                       b43legacyerr(wl, "Fatal: Coult not start device for "
+                       b43legacyerr(wl, "Fatal: Could not start device for "
                               "newly selected %s-PHY mode\n",
                               phymode_to_string(new_mode));
                        b43legacy_wireless_core_exit(up_dev);
index e2480d19627679c89cb166f9a4954db74dd21ff8..8e7e6928c93699bf9b7df35f9efc4c9b26928481 100644 (file)
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
        data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
        brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
-       /* redirect, configure ane enable io for interrupt signal */
+       /* redirect, configure and enable io for interrupt signal */
        data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
-       if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
+       if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
                data |= SDIO_SEPINT_ACT_HI;
        brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
 
index c5a34ffe64599e9d5852aeec83f93ab00a215901..a299d42da8e74a358939b8fa5da8a32a01fd312b 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
 #include <linux/usb.h>
+#include <linux/vmalloc.h>
 #include <net/cfg80211.h>
 
 #include <defs.h>
@@ -1239,7 +1240,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
                return -EINVAL;
        }
 
-       devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+       devinfo->image = vmalloc(fw->size); /* plus nvram */
        if (!devinfo->image)
                return -ENOMEM;
 
@@ -1603,7 +1604,7 @@ static struct usb_driver brcmf_usbdrvr = {
 void brcmf_usb_exit(void)
 {
        usb_deregister(&brcmf_usbdrvr);
-       kfree(g_image.data);
+       vfree(g_image.data);
        g_image.data = NULL;
        g_image.len = 0;
 }
index 9cfae0c08707d95e21a68bdd8822bc4f966e223f..95aa8e1683ecb4bdeb663e0cf605d699407b96f0 100644 (file)
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
        netif_stop_queue(priv->net_dev);
 }
 
-/* Called by register_netdev() */
-static int ipw2100_net_init(struct net_device *dev)
-{
-       struct ipw2100_priv *priv = libipw_priv(dev);
-
-       return ipw2100_up(priv, 1);
-}
-
 static int ipw2100_wdev_init(struct net_device *dev)
 {
        struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
        .ndo_stop               = ipw2100_close,
        .ndo_start_xmit         = libipw_xmit,
        .ndo_change_mtu         = libipw_change_mtu,
-       .ndo_init               = ipw2100_net_init,
        .ndo_tx_timeout         = ipw2100_tx_timeout,
        .ndo_set_mac_address    = ipw2100_set_address,
        .ndo_validate_addr      = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        printk(KERN_INFO DRV_NAME
               ": Detected Intel PRO/Wireless 2100 Network Connection\n");
 
+       err = ipw2100_up(priv, 1);
+       if (err)
+               goto fail;
+
        err = ipw2100_wdev_init(dev);
        if (err)
                goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
         * network device we would call ipw2100_up.  This introduced a race
         * condition with newer hotplug configurations (network was coming
         * up and making calls before the device was initialized).
-        *
-        * If we called ipw2100_up before we registered the device, then the
-        * device name wasn't registered.  So, we instead use the net_dev->init
-        * member to call a function that then just turns and calls ipw2100_up.
-        * net_dev->init is called after name allocation but before the
-        * notifier chain is called */
+        */
        err = register_netdev(dev);
        if (err) {
                printk(KERN_WARNING DRV_NAME
index 509301a5e7e264ae079201a246ded0252582308a..ff5d689e13f34884222b58fe06291140db1f9682 100644 (file)
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
                return 0;
        }
 
-       if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+       if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
                IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
                        key_flags);
                spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
        memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
        il->stations[sta_id].sta.key.key_flags =
            STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
-       il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+       il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
        il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
        il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 
index cbf2dc18341f58eec6141158e0c6d3aaad02d12b..5d4807c2b56d80aaafe47089a2498b29856ba40c 100644 (file)
@@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
                return;
 
        /* monitor and check for other stuck queues */
-       if (il_is_any_associated(il)) {
-               for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
-                       /* skip as we already checked the command queue */
-                       if (cnt == il->cmd_queue)
-                               continue;
-                       if (il_check_stuck_queue(il, cnt))
-                               return;
-               }
+       for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+               /* skip as we already checked the command queue */
+               if (cnt == il->cmd_queue)
+                       continue;
+               if (il_check_stuck_queue(il, cnt))
+                       return;
        }
 
        mod_timer(&il->watchdog,
index db6c6e528022635638f8aa8594c2a6872da52f9b..2463c06264387230759f14801239610a4fe4eb58 100644 (file)
@@ -137,11 +137,3 @@ config IWLWIFI_EXPERIMENTAL_MFP
          even if the microcode doesn't advertise it.
 
          Say Y only if you want to experiment with MFP.
-
-config IWLWIFI_UCODE16
-       bool "support uCode 16.0"
-       depends on IWLWIFI
-       help
-         This option enables support for uCode version 16.0.
-
-         Say Y if you want to use 16.0 microcode.
index 406f297a9a56dd27eecde87f8d027a879d337137..d615eacbf050be320d803c74fec0ace223abae73 100644 (file)
@@ -18,7 +18,6 @@ iwlwifi-objs          += iwl-notif-wait.o
 iwlwifi-objs           += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
 
 
-iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
index 7f793417c78740b5fd908aa976ea208d5c2c6c3f..8133105ac6450ae19ae2743019aae2a3182b1b46 100644 (file)
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl2000_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
        .hd_v2 = true,
 };
 
@@ -97,7 +97,7 @@ static const struct iwl_base_params iwl2030_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
        .hd_v2 = true,
 };
 
index 381b02cf339c46e0353b07bf04f201b70f49a4bf..e5e8ada4aaf6ad93cd109ee2112921ed136b3c00 100644 (file)
 #define IWL6000_UCODE_API_MAX 6
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 6
+#define IWL6035_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
 #define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
 #define IWL6050_UCODE_API_OK 5
 #define IWL6000G2B_UCODE_API_OK 6
+#define IWL6035_UCODE_API_OK 6
 
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
-#define IWL6000G2_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 5
+#define IWL6035_UCODE_API_MIN 6
 
 /* EEPROM versions */
 #define EEPROM_6000_TX_POWER_VERSION   (4)
@@ -86,7 +89,7 @@ static const struct iwl_base_params iwl6000_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +105,7 @@ static const struct iwl_base_params iwl6050_base_params = {
        .chain_noise_scale = 1500,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 1024,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +121,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_ht_params iwl6000_ht_params = {
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
        IWL_DEVICE_6030,
 };
 
+#define IWL_DEVICE_6035                                                \
+       .fw_name_pre = IWL6030_FW_PRE,                          \
+       .ucode_api_max = IWL6035_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL6035_UCODE_API_OK,                   \
+       .ucode_api_min = IWL6035_UCODE_API_MIN,                 \
+       .device_family = IWL_DEVICE_FAMILY_6030,                \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                   \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                   \
+       .eeprom_ver = EEPROM_6030_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,       \
+       .base_params = &iwl6000_g2_base_params,                 \
+       .bt_params = &iwl6000_bt_params,                        \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true
+
 const struct iwl_cfg iwl6035_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
-       IWL_DEVICE_6030,
+       IWL_DEVICE_6035,
        .ht_params = &iwl6000_ht_params,
 };
 
index 51e1a69ffdda629ff84c7008513202d85ce2e94d..8cebd7c363fc301477cd71e5ed8b96a184d58c16 100644 (file)
@@ -884,6 +884,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
            (priv->bt_full_concurrent != full_concurrent)) {
                priv->bt_full_concurrent = full_concurrent;
+               priv->last_bt_traffic_load = priv->bt_traffic_load;
 
                /* Update uCode's rate table. */
                tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
index b31584e87bc7f4d03f2d3d2ba727879979b19156..eb6a8eaf42fc54b54ddfc57926f6f29c60371bdc 100644 (file)
@@ -772,7 +772,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                                                ~IWL_STA_DRIVER_ACTIVE;
                                priv->stations[i].used &=
                                                ~IWL_STA_UCODE_INPROGRESS;
-                               spin_unlock_bh(&priv->sta_lock);
+                               continue;
                        }
                        /*
                         * Rate scaling has already been initialized, send
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
                key_flags |= STA_KEY_MULTICAST_MSK;
 
        sta_cmd.key.key_flags = key_flags;
-       sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+       sta_cmd.key.key_offset = keyconf->hw_key_idx;
        sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
        sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
 
index e7c157e5ebebdb2c181d1473a02de769e570e3e0..7f97dec8534db1f60ba843bfa317b5cf303a6285 100644 (file)
@@ -2239,6 +2239,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
        return count;
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
                                         char __user *user_buf,
                                         size_t count, loff_t *ppos)
@@ -2276,6 +2277,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
 
        return count;
 }
+#endif
 
 static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
                                         char __user *user_buf,
@@ -2345,7 +2347,9 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
 DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
 DEBUGFS_READ_FILE_OPS(reply_tx_error);
 DEBUGFS_WRITE_FILE_OPS(echo_test);
+#ifdef CONFIG_IWLWIFI_DEBUG
 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
 DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
 
 /*
@@ -2405,7 +2409,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
        DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
+#endif
 
        if (iwl_advanced_bt_coexist(priv))
                DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
index 3c72bad0ae56fc3d1e443f2b08a70e337910e4f0..fac67a526a30880199bb12b8fb7310e6a3471a75 100644 (file)
@@ -657,17 +657,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
        return -EINVAL;
 }
 
-static int alloc_pci_desc(struct iwl_drv *drv,
-                         struct iwl_firmware_pieces *pieces,
-                         enum iwl_ucode_type type)
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+                          struct iwl_firmware_pieces *pieces,
+                          enum iwl_ucode_type type)
 {
        int i;
        for (i = 0;
             i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
             i++)
                if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
-                                               get_sec(pieces, type, i)))
-                       return -1;
+                                     get_sec(pieces, type, i)))
+                       return -ENOMEM;
        return 0;
 }
 
@@ -825,8 +825,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
         * 1) unmodified from disk
         * 2) backup cache for save/restore during power-downs */
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
-               if (alloc_pci_desc(drv, &pieces, i))
-                       goto err_pci_alloc;
+               if (iwl_alloc_ucode(drv, &pieces, i))
+                       goto out_free_fw;
 
        /* Now that we can no longer fail, copy information */
 
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
 
        /* We have our copies now, allow OS release its copies */
        release_firmware(ucode_raw);
-       complete(&drv->request_firmware_complete);
 
        drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
 
        if (!drv->op_mode)
                goto out_unbind;
 
+       /*
+        * Complete the firmware request last so that
+        * a driver unbind (stop) doesn't run while we
+        * are doing the start() above.
+        */
+       complete(&drv->request_firmware_complete);
        return;
 
  try_again:
@@ -877,7 +882,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
                goto out_unbind;
        return;
 
err_pci_alloc:
out_free_fw:
        IWL_ERR(drv, "failed to allocate pci memory\n");
        iwl_dealloc_ucode(drv);
        release_firmware(ucode_raw);
index 50c58911e7188c3a7a2f29a6d4bf55f7a9e05953..b8e2b223ac36b6c1634e78256253d6095bcce08a 100644 (file)
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
  * iwl_get_max_txpower_avg - get the highest tx power from all chains.
  *     find the highest tx power from all chains for the channel
  */
-static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
+static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
                struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
                int element, s8 *max_txpower_in_half_dbm)
 {
        s8 max_txpower_avg = 0; /* (dBm) */
 
        /* Take the highest tx power from any valid chains */
-       if ((cfg->valid_tx_ant & ANT_A) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_A) &&
            (enhanced_txpower[element].chain_a_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_a_max;
-       if ((cfg->valid_tx_ant & ANT_B) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_B) &&
            (enhanced_txpower[element].chain_b_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_b_max;
-       if ((cfg->valid_tx_ant & ANT_C) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_C) &&
            (enhanced_txpower[element].chain_c_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_c_max;
-       if (((cfg->valid_tx_ant == ANT_AB) |
-           (cfg->valid_tx_ant == ANT_BC) |
-           (cfg->valid_tx_ant == ANT_AC)) &&
+       if (((priv->hw_params.valid_tx_ant == ANT_AB) |
+           (priv->hw_params.valid_tx_ant == ANT_BC) |
+           (priv->hw_params.valid_tx_ant == ANT_AC)) &&
            (enhanced_txpower[element].mimo2_max > max_txpower_avg))
                max_txpower_avg =  enhanced_txpower[element].mimo2_max;
-       if ((cfg->valid_tx_ant == ANT_ABC) &&
+       if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
            (enhanced_txpower[element].mimo3_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].mimo3_max;
 
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
                                 ((txp->delta_20_in_40 & 0xf0) >> 4),
                                 (txp->delta_20_in_40 & 0x0f));
 
-               max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
+               max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
                                                      &max_txp_avg_halfdbm);
 
                /*
index ab2f4d7500a40df03d68293986d8c6ae46fc9969..013680332f075be85a9035bcec33eaec0c2e9285 100644 (file)
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
                            WIPHY_FLAG_IBSS_RSN;
 
+#ifdef CONFIG_PM_SLEEP
        if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
            priv->trans->ops->wowlan_suspend &&
            device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                hw->wiphy->wowlan.pattern_max_len =
                                        IWLAGN_WOWLAN_MAX_PATTERN_LEN;
        }
+#endif
 
        if (iwlwifi_mod_params.power_save)
                hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
                IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+               iwl_leds_exit(priv);
                return ret;
        }
        priv->mac80211_registered = 1;
@@ -793,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
        switch (op) {
        case ADD:
                ret = iwlagn_mac_sta_add(hw, vif, sta);
+               if (ret)
+                       break;
+               /*
+                * Clear the in-progress flag, the AP station entry was added
+                * but we'll initialize LQ only when we've associated (which
+                * would also clear the in-progress flag). This is necessary
+                * in case we never initialize LQ because association fails.
+                */
+               spin_lock_bh(&priv->sta_lock);
+               priv->stations[iwl_sta_id(sta)].used &=
+                       ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_bh(&priv->sta_lock);
                break;
        case REMOVE:
                ret = iwlagn_mac_sta_remove(hw, vif, sta);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644 (file)
index f166955..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "iwl-debug.h"
-#include "iwl-dev.h"
-
-#include "iwl-phy-db.h"
-
-#define CHANNEL_NUM_SIZE       4       /* num of channels in calib_ch size */
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
-{
-       struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
-                                           GFP_KERNEL);
-
-       if (!phy_db)
-               return phy_db;
-
-       phy_db->dev = dev;
-
-       /* TODO: add default values of the phy db. */
-       return phy_db;
-}
-
-/*
- * get phy db section: returns a pointer to a phy db section specified by
- * type and channel group id.
- */
-static struct iwl_phy_db_entry *
-iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
-                      enum iwl_phy_db_section_type type,
-                      u16 chg_id)
-{
-       if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
-               return NULL;
-
-       switch (type) {
-       case IWL_PHY_DB_CFG:
-               return &phy_db->cfg;
-       case IWL_PHY_DB_CALIB_NCH:
-               return &phy_db->calib_nch;
-       case IWL_PHY_DB_CALIB_CH:
-               return &phy_db->calib_ch;
-       case IWL_PHY_DB_CALIB_CHG_PAPD:
-               if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
-                       return NULL;
-               return &phy_db->calib_ch_group_papd[chg_id];
-       case IWL_PHY_DB_CALIB_CHG_TXP:
-               if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
-                       return NULL;
-               return &phy_db->calib_ch_group_txp[chg_id];
-       default:
-               return NULL;
-       }
-       return NULL;
-}
-
-static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
-                                   enum iwl_phy_db_section_type type,
-                                   u16 chg_id)
-{
-       struct iwl_phy_db_entry *entry =
-                               iwl_phy_db_get_section(phy_db, type, chg_id);
-       if (!entry)
-               return;
-
-       kfree(entry->data);
-       entry->data = NULL;
-       entry->size = 0;
-}
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db)
-{
-       int i;
-
-       if (!phy_db)
-               return;
-
-       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
-       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
-       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
-       for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
-               iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
-       for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
-               iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-
-       kfree(phy_db);
-}
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
-                          enum iwl_phy_db_section_type type, u8 *data,
-                          u16 size, gfp_t alloc_ctx)
-{
-       struct iwl_phy_db_entry *entry;
-       u16 chg_id = 0;
-
-       if (!phy_db)
-               return -EINVAL;
-
-       if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
-           type == IWL_PHY_DB_CALIB_CHG_TXP)
-               chg_id = le16_to_cpup((__le16 *)data);
-
-       entry = iwl_phy_db_get_section(phy_db, type, chg_id);
-       if (!entry)
-               return -EINVAL;
-
-       kfree(entry->data);
-       entry->data = kmemdup(data, size, alloc_ctx);
-       if (!entry->data) {
-               entry->size = 0;
-               return -ENOMEM;
-       }
-
-       entry->size = size;
-
-       if (type == IWL_PHY_DB_CALIB_CH) {
-               phy_db->channel_num = le32_to_cpup((__le32 *)data);
-               phy_db->channel_size =
-                     (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
-       }
-
-       return 0;
-}
-
-static int is_valid_channel(u16 ch_id)
-{
-       if (ch_id <= 14 ||
-           (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
-           (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
-           (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
-               return 1;
-       return 0;
-}
-
-static u8 ch_id_to_ch_index(u16 ch_id)
-{
-       if (WARN_ON(!is_valid_channel(ch_id)))
-               return 0xff;
-
-       if (ch_id <= 14)
-               return ch_id - 1;
-       if (ch_id <= 64)
-               return (ch_id + 20) / 4;
-       if (ch_id <= 140)
-               return (ch_id - 12) / 4;
-       return (ch_id - 13) / 4;
-}
-
-
-static u16 channel_id_to_papd(u16 ch_id)
-{
-       if (WARN_ON(!is_valid_channel(ch_id)))
-               return 0xff;
-
-       if (1 <= ch_id && ch_id <= 14)
-               return 0;
-       if (36 <= ch_id && ch_id <= 64)
-               return 1;
-       if (100 <= ch_id && ch_id <= 140)
-               return 2;
-       return 3;
-}
-
-static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
-{
-       struct iwl_phy_db_chg_txp *txp_chg;
-       int i;
-       u8 ch_index = ch_id_to_ch_index(ch_id);
-       if (ch_index == 0xff)
-               return 0xff;
-
-       for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
-               txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
-               if (!txp_chg)
-                       return 0xff;
-               /*
-                * Looking for the first channel group that its max channel is
-                * higher then wanted channel.
-                */
-               if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
-                       return i;
-       }
-       return 0xff;
-}
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
-                               enum iwl_phy_db_section_type type, u8 **data,
-                               u16 *size, u16 ch_id)
-{
-       struct iwl_phy_db_entry *entry;
-       u32 channel_num;
-       u32 channel_size;
-       u16 ch_group_id = 0;
-       u16 index;
-
-       if (!phy_db)
-               return -EINVAL;
-
-       /* find wanted channel group */
-       if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
-               ch_group_id = channel_id_to_papd(ch_id);
-       else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
-               ch_group_id = channel_id_to_txp(phy_db, ch_id);
-
-       entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
-       if (!entry)
-               return -EINVAL;
-
-       if (type == IWL_PHY_DB_CALIB_CH) {
-               index = ch_id_to_ch_index(ch_id);
-               channel_num = phy_db->channel_num;
-               channel_size = phy_db->channel_size;
-               if (index >= channel_num) {
-                       IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
-                       return -EINVAL;
-               }
-               *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
-               *size = channel_size;
-       } else {
-               *data = entry->data;
-               *size = entry->size;
-       }
-       return 0;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644 (file)
index c34c6a9..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#ifndef __IWL_PHYDB_H__
-#define __IWL_PHYDB_H__
-
-#include <linux/types.h>
-
-#define IWL_NUM_PAPD_CH_GROUPS 4
-#define IWL_NUM_TXP_CH_GROUPS  8
-
-struct iwl_phy_db_entry {
-       u16     size;
-       u8      *data;
-};
-
-struct iwl_shared;
-
-/**
- * struct iwl_phy_db - stores phy configuration and calibration data.
- *
- * @cfg: phy configuration.
- * @calib_nch: non channel specific calibration data.
- * @calib_ch: channel specific calibration data.
- * @calib_ch_group_papd: calibration data related to papd channel group.
- * @calib_ch_group_txp: calibration data related to tx power chanel group.
- */
-struct iwl_phy_db {
-       struct iwl_phy_db_entry cfg;
-       struct iwl_phy_db_entry calib_nch;
-       struct iwl_phy_db_entry calib_ch;
-       struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
-       struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
-
-       u32 channel_num;
-       u32 channel_size;
-
-       /* for an access to the logger */
-       struct device *dev;
-};
-
-enum iwl_phy_db_section_type {
-       IWL_PHY_DB_CFG = 1,
-       IWL_PHY_DB_CALIB_NCH,
-       IWL_PHY_DB_CALIB_CH,
-       IWL_PHY_DB_CALIB_CHG_PAPD,
-       IWL_PHY_DB_CALIB_CHG_TXP,
-       IWL_PHY_DB_MAX
-};
-
-/* for parsing of tx power channel group data that comes from the firmware*/
-struct iwl_phy_db_chg_txp {
-       __le32 space;
-       __le16 max_channel_idx;
-} __packed;
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
-                          enum iwl_phy_db_section_type type, u8 *data,
-                          u16 size, gfp_t alloc_ctx);
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
-                               enum iwl_phy_db_section_type type, u8 **data,
-                               u16 *size, u16 ch_id);
-
-#endif /* __IWL_PHYDB_H__ */
index 3b1069290fa9a9cf646a2f04325d6c1862b70a7a..dfd54662e3e675bc2c560894cd47d002cbf5ff76 100644 (file)
 #define SCD_TXFACT             (SCD_BASE + 0x10)
 #define SCD_ACTIVE             (SCD_BASE + 0x14)
 #define SCD_QUEUECHAIN_SEL     (SCD_BASE + 0xe8)
+#define SCD_CHAINEXT_EN                (SCD_BASE + 0x244)
 #define SCD_AGGR_SEL           (SCD_BASE + 0x248)
 #define SCD_INTERRUPT_MASK     (SCD_BASE + 0x108)
 
index 6213c05a4b529c6ba0f4263b83d9050afcfdd614..e959207c630a9352f143536c1a51a5de995ee96b 100644 (file)
@@ -347,7 +347,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
                                 int sta_id, int tid, int frame_limit, u16 ssn);
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-       int index, enum dma_data_direction dma_dir);
+                        enum dma_data_direction dma_dir);
 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
                         struct sk_buff_head *skbs);
 int iwl_queue_space(const struct iwl_queue *q);
index 21a8a672fbb258caae8735c61fcfbc0f09e9f426..a8750238ee09b78a99623693332e8c8c40c83a85 100644 (file)
@@ -204,33 +204,39 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
        for (i = 1; i < num_tbs; i++)
                dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
                                iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+       tfd->num_tbs = 0;
 }
 
 /**
  * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  * @trans - transport private data
  * @txq - tx queue
- * @index - the index of the TFD to be freed
- *@dma_dir - the direction of the DMA mapping
+ * @dma_dir - the direction of the DMA mapping
  *
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-       int index, enum dma_data_direction dma_dir)
+                        enum dma_data_direction dma_dir)
 {
        struct iwl_tfd *tfd_tmp = txq->tfds;
 
+       /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+       int rd_ptr = txq->q.read_ptr;
+       int idx = get_cmd_index(&txq->q, rd_ptr);
+
        lockdep_assert_held(&txq->lock);
 
-       iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
-                        &tfd_tmp[index], dma_dir);
+       /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+       iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
+                        &tfd_tmp[rd_ptr], dma_dir);
 
        /* free SKB */
        if (txq->entries) {
                struct sk_buff *skb;
 
-               skb = txq->entries[index].skb;
+               skb = txq->entries[idx].skb;
 
                /* Can be called from irqs-disabled context
                 * If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
                 */
                if (skb) {
                        iwl_op_mode_free_skb(trans->op_mode, skb);
-                       txq->entries[index].skb = NULL;
+                       txq->entries[idx].skb = NULL;
                }
        }
 }
@@ -973,7 +979,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
 
                iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
 
-               iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+               iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
                freed++;
        }
 
index 2e57161854b901187f40a50628db2fdf6b4c8ac0..79c6b91417f9430c2982b4e51aefd8f3f7dc47b3 100644 (file)
@@ -435,9 +435,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
 
        spin_lock_bh(&txq->lock);
        while (q->write_ptr != q->read_ptr) {
-               /* The read_ptr needs to bound by q->n_window */
-               iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
-                                   dma_dir);
+               iwlagn_txq_free_tfd(trans, txq, dma_dir);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
        spin_unlock_bh(&txq->lock);
@@ -1060,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
        iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
                       trans_pcie->scd_bc_tbls.dma >> 10);
 
+       /* The chain extension of the SCD doesn't work well. This feature is
+        * enabled by default by the HW, so we need to disable it manually.
+        */
+       iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
        /* Enable DMA channel */
        for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
                iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
index fb787df0166699f9c31e7adf8ff25c924c08f7a8..a0b7cfd3468532e705eeedaf4cf93cdb1e5be536 100644 (file)
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
                        hdr = (struct ieee80211_hdr *) skb->data;
                        mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
                }
+               txi->flags |= IEEE80211_TX_STAT_ACK;
        }
        ieee80211_tx_status_irqsafe(data2->hw, skb);
        return 0;
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void)
                       "unregister family %i\n", ret);
 }
 
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+       { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+       { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+                                BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb = {
+       .limits = hwsim_if_limits,
+       .n_limits = ARRAY_SIZE(hwsim_if_limits),
+       .max_interfaces = 2048,
+       .num_different_channels = 1,
+};
+
 static int __init init_mac80211_hwsim(void)
 {
        int i, err = 0;
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void)
                hw->wiphy->n_addresses = 2;
                hw->wiphy->addresses = data->addresses;
 
+               hw->wiphy->iface_combinations = &hwsim_if_comb;
+               hw->wiphy->n_iface_combinations = 1;
+
                if (fake_hw_scan) {
                        hw->wiphy->max_scan_ssids = 255;
                        hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
index 9c44088054dd90bd967626d8416e63c083225fc8..900ee129e825987dfb2dbe6b485bfa0bb40dbc74 100644 (file)
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        else
                last_seq = priv->rx_seq[tid];
 
-       if (last_seq >= new_node->start_win)
+       if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+           last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
-       memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+       mwifiex_reset_11n_rx_seq_num(priv);
 }
index f1bffebabc60a6ab1059276664bc3f3f71e129bb..6c9815a0f5d8b0d7aebcb5d6a9953a24819ad45a 100644 (file)
 
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+       memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
                               u16 seqNum,
                               u16 tid, u8 *ta,
index 87671446e24b00203e0398142894e8fec547d308..5c7fd185373cfb37c0ee57e62b9b54135af505b7 100644 (file)
@@ -948,6 +948,20 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
                bss_cfg->ssid.ssid_len = params->ssid_len;
        }
 
+       switch (params->hidden_ssid) {
+       case NL80211_HIDDEN_SSID_NOT_IN_USE:
+               bss_cfg->bcast_ssid_ctl = 1;
+               break;
+       case NL80211_HIDDEN_SSID_ZERO_LEN:
+               bss_cfg->bcast_ssid_ctl = 0;
+               break;
+       case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+               /* firmware doesn't support this type of hidden SSID */
+       default:
+               kfree(bss_cfg);
+               return -EINVAL;
+       }
+
        if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
                kfree(bss_cfg);
                wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
@@ -1471,7 +1485,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        struct wireless_dev *wdev;
 
        if (!adapter)
-               return NULL;
+               return ERR_PTR(-EFAULT);
 
        switch (type) {
        case NL80211_IFTYPE_UNSPECIFIED:
@@ -1481,12 +1495,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                if (priv->bss_mode) {
                        wiphy_err(wiphy,
                                  "cannot create multiple sta/adhoc ifaces\n");
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
 
                wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
                if (!wdev)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                wdev->wiphy = wiphy;
                priv->wdev = wdev;
@@ -1509,12 +1523,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                if (priv->bss_mode) {
                        wiphy_err(wiphy, "Can't create multiple AP interfaces");
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
 
                wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
                if (!wdev)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                priv->wdev = wdev;
                wdev->wiphy = wiphy;
@@ -1531,14 +1545,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                break;
        default:
                wiphy_err(wiphy, "type not supported\n");
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
                              ether_setup, 1);
        if (!dev) {
                wiphy_err(wiphy, "no memory available for netdevice\n");
-               goto error;
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-ENOMEM);
        }
 
        mwifiex_init_priv_params(priv, dev);
@@ -1569,7 +1584,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        /* Register network device */
        if (register_netdevice(dev)) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
-               goto error;
+               free_netdev(dev);
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-EFAULT);
        }
 
        sema_init(&priv->async_sem, 1);
@@ -1581,12 +1598,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        mwifiex_dev_debugfs_init(priv);
 #endif
        return dev;
-error:
-       if (dev && (dev->reg_state == NETREG_UNREGISTERED))
-               free_netdev(dev);
-       priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
-
-       return NULL;
 }
 EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
 
index 9f674bbebe65afc2f3fa2edf001af0d414cd8b4d..561452a5c818f4a3d6ea653d029cd32fdab80587 100644 (file)
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_CHANNELBANDLIST    (PROPRIETARY_TLV_BASE_ID + 42)
 #define TLV_TYPE_UAP_BEACON_PERIOD  (PROPRIETARY_TLV_BASE_ID + 44)
 #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
+#define TLV_TYPE_UAP_BCAST_SSID     (PROPRIETARY_TLV_BASE_ID + 48)
 #define TLV_TYPE_UAP_RTS_THRESHOLD  (PROPRIETARY_TLV_BASE_ID + 51)
 #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
 #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid {
        u8 ssid[0];
 } __packed;
 
+struct host_cmd_tlv_bcast_ssid {
+       struct host_cmd_tlv tlv;
+       u8 bcast_ctl;
+} __packed;
+
 struct host_cmd_tlv_beacon_period {
        struct host_cmd_tlv tlv;
        __le16 period;
index ceb82cd749cc7ba1ca2054ae0b675802d2194190..383820a52beba0d62428614131adbe42def1e5db 100644 (file)
@@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
                /* save assoc resp ie index after auto-indexing */
                *assoc_idx = *((u16 *)pos);
 
+       kfree(ap_custom_ie);
        return ret;
 }
 
index e0377473282f05fab01835578cb9b2579a40217f..fc8a9bfa1248305fababa37b59ca90110a57afaa 100644 (file)
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
                adapter->event_cause = *(u32 *) skb->data;
 
-               skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
-                       memcpy(adapter->event_body, skb->data, skb->len);
+                       memcpy(adapter->event_body,
+                              skb->data + MWIFIEX_EVENT_HEADER_LEN,
+                              skb->len);
 
                /* event cause has been saved to adapter->event_cause */
                adapter->event_received = true;
index 4ace5a3dcd237c5aada48223e224a7f943ef2692..11e731f3581c2dbf4856d8676ca6ee66c12ccabe 100644 (file)
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_UAP_STA_ASSOC:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
                        len = -1;
 
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                 GFP_KERNEL);
                break;
        case EVENT_UAP_STA_DEAUTH:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
-                                GFP_KERNEL);
+               cfg80211_del_sta(priv->netdev, adapter->event_body +
+                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
index e2faec4db10863c28c7156e922b14b2ca4fa02c9..cecb27283196150afdcecc56c451fa396456891f 100644 (file)
@@ -161,15 +161,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                goto done;
 
        for (i = 0; i < adapter->priv_num; i++) {
-
                tpriv = adapter->priv[i];
 
-               if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
-                   (tpriv->media_connected)) {
-                       if (netif_queue_stopped(tpriv->netdev))
-                               mwifiex_wake_up_net_dev_queue(tpriv->netdev,
-                                                             adapter);
-               }
+               if (tpriv->media_connected &&
+                   netif_queue_stopped(tpriv->netdev))
+                       mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
        }
 done:
        dev_kfree_skb_any(skb);
index 76dfbc42a732fc92530fc68a7b8573ea0f54d437..89f9a2a45de3f772e15cbd4084735e9bc7013d54 100644 (file)
@@ -27,6 +27,17 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
                              struct cfg80211_ap_settings *params) {
        int i;
 
+       if (!params->privacy) {
+               bss_config->protocol = PROTOCOL_NO_SECURITY;
+               bss_config->key_mgmt = KEY_MGMT_NONE;
+               bss_config->wpa_cfg.length = 0;
+               priv->sec_info.wep_enabled = 0;
+               priv->sec_info.wpa_enabled = 0;
+               priv->sec_info.wpa2_enabled = 0;
+
+               return 0;
+       }
+
        switch (params->auth_type) {
        case NL80211_AUTHTYPE_OPEN_SYSTEM:
                bss_config->auth_mode = WLAN_AUTH_OPEN;
@@ -132,6 +143,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        struct host_cmd_tlv_dtim_period *dtim_period;
        struct host_cmd_tlv_beacon_period *beacon_period;
        struct host_cmd_tlv_ssid *ssid;
+       struct host_cmd_tlv_bcast_ssid *bcast_ssid;
        struct host_cmd_tlv_channel_band *chan_band;
        struct host_cmd_tlv_frag_threshold *frag_threshold;
        struct host_cmd_tlv_rts_threshold *rts_threshold;
@@ -153,6 +165,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
                cmd_size += sizeof(struct host_cmd_tlv) +
                            bss_cfg->ssid.ssid_len;
                tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+
+               bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
+               bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+               bcast_ssid->tlv.len =
+                               cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
+               bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
+               cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
+               tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
        }
        if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
                chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -416,6 +436,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
        if (!bss_cfg)
                return -ENOMEM;
 
+       mwifiex_set_sys_config_invalid_data(bss_cfg);
        bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
        bss_cfg->channel = channel;
 
index 49ebf20c56ebc0ac98dc8d5b25bc3a1164847020..22a5916564b84099576e1c554b37d1fcbcd420b2 100644 (file)
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        struct device *dev = adapter->dev;
        u32 recv_type;
        __le32 tmp;
+       int ret;
 
        if (adapter->hs_activated)
                mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_CMD:
                        if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
                                dev_err(dev, "CMD: skb->len too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        } else if (!adapter->curr_cmd) {
                                dev_dbg(dev, "CMD: no curr_cmd\n");
                                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                                        mwifiex_process_sleep_confirm_resp(
                                                        adapter, skb->data,
                                                        skb->len);
-                                       return 0;
+                                       ret = 0;
+                                       goto exit_restore_skb;
                                }
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
                        adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_EVENT:
                        if (skb->len < sizeof(u32)) {
                                dev_err(dev, "EVENT: skb->len too small\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
                        skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
                        adapter->event_cause = le32_to_cpu(tmp);
-                       skb_pull(skb, sizeof(u32));
                        dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
 
                        if (skb->len > MAX_EVENT_SIZE) {
                                dev_err(dev, "EVENT: event body too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
-                       skb_copy_from_linear_data(skb, adapter->event_body,
-                                                 skb->len);
+                       memcpy(adapter->event_body, skb->data +
+                              MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
                        adapter->event_received = true;
                        adapter->event_skb = skb;
                        break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        }
 
        return -EINPROGRESS;
+
+exit_restore_skb:
+       /* The buffer will be reused for further cmds/events */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       return ret;
 }
 
 static void mwifiex_usb_rx_complete(struct urb *urb)
index f3fc6551585780b08724bb8112457caafeef220e..3fa4d417699381225e853a56238e0d8506a2f99b 100644 (file)
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
                priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
 
+               mwifiex_reset_11n_rx_seq_num(priv);
+
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        if (!ptr->is_11n_enabled ||
            mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+           priv->wps.session_enable ||
            ((priv->sec_info.wpa_enabled ||
              priv->sec_info.wpa2_enabled) &&
             !priv->wpa_is_gtk_set)) {
index 2e9e6af21362c1fe302d68e61cb4a83d229f5b24..dfcd02ab6cae5add70a2eb47c703d54b32c324ce 100644 (file)
@@ -2110,7 +2110,7 @@ resize_buf:
        while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
                if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
                    matched) {
-                       if (!ether_addr_equal(bssid->mac, match_bssid))
+                       if (ether_addr_equal(bssid->mac, match_bssid))
                                *matched = true;
                }
 
index ca36cccaba31d87b1920f4238aad0be364fce060..8f754025b06ead9b9a2c9d5be62d5d19b871690f 100644 (file)
@@ -396,8 +396,7 @@ struct rt2x00_intf {
         * for hardware which doesn't support hardware
         * sequence counting.
         */
-       spinlock_t seqlock;
-       u16 seqno;
+       atomic_t seqno;
 };
 
 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
index b49773ef72f2d0c32e3e4e5f55f2ebac3bd540bc..dd24b2663b5e528e04a0814726ec0f06ceae6cf3 100644 (file)
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
        else
                rt2x00dev->intf_sta_count++;
 
-       spin_lock_init(&intf->seqlock);
        mutex_init(&intf->beacon_skb_mutex);
        intf->beacon = entry;
 
index 4c662eccf53c60e32e352ac96259268103b539f7..2fd830103415dca94a8d582ec1de91c93b074cb0 100644 (file)
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
+       u16 seqno;
 
        if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
                return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
         * sequence counting per-frame, since those will override the
         * sequence counter given by mac80211.
         */
-       spin_lock(&intf->seqlock);
-
        if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
-               intf->seqno += 0x10;
-       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-       hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
-
-       spin_unlock(&intf->seqlock);
+               seqno = atomic_add_return(0x10, &intf->seqno);
+       else
+               seqno = atomic_read(&intf->seqno);
 
+       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+       hdr->seq_ctrl |= cpu_to_le16(seqno);
 }
 
 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
index d357d1ed92f6ddc363f0babca95652d8f446b3c2..74ecc33fdd90c249d5fcd3c46d68d646113e0985 100644 (file)
@@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
        case QID_RX:
                if (!rt2x00queue_full(queue))
                        rt2x00queue_for_each_entry(queue,
-                                                  Q_INDEX_DONE,
                                                   Q_INDEX,
+                                                  Q_INDEX_DONE,
                                                   NULL,
                                                   rt2x00usb_kick_rx_entry);
                break;
index 2e0de2f5f0f92d22a1e384dab601c72912216f9a..c2d5b495c179a1021dd4cd4221c0032f3a99e34a 100644 (file)
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
                        radio_on = true;
                } else if (radio_on) {
                        radio_on = false;
-                       cancel_delayed_work_sync(&priv->led_on);
+                       cancel_delayed_work(&priv->led_on);
                        ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
                }
        } else if (radio_on) {
index d228358e6a403b9c8eee56d63ce4991058975dc9..9970c2b1b19979dc3577472c7c21e27703a38a76 100644 (file)
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
        {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
        {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
        {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
index ad87a1ac6462aab40befe12db72eef99fcc52f5b..db6430c1a08414650a894d3e8a6d41c57cba1711 100644 (file)
@@ -869,7 +869,7 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
        }
 
        *mactime = tsf_info->current_tsf_lsb |
-               (tsf_info->current_tsf_msb << 31);
+               ((u64)tsf_info->current_tsf_msb << 32);
 
 out:
        kfree(tsf_info);
index 9f15ccaf8f05aec733882e98a3ff443fdb5737d0..5ec50a476a69c084ccba378813db16fca7087db1 100644 (file)
@@ -76,8 +76,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                }
        }
 
-       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
-           wl->station_mode != STATION_ACTIVE_MODE) {
+       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
                wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
 
                /* indicate to the stack, that beacons have been lost */
index 1b851f650e074eb795ffb708dd25b3c94a2cb9e1..e2750a12c6f160a922609f775fcf65c4c81d662e 100644 (file)
@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
        }
 
        if (wl->irq) {
+               irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
                ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
                if (ret < 0) {
                        wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
                }
 
                irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-               disable_irq(wl->irq);
 
                wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
                wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
index 6248c354fc5c659fd840a2e7cff6186536207596..567660cd2fcd3f3ead3a5c82baa38f6eed6d3690 100644 (file)
@@ -73,6 +73,8 @@ static void wl1251_spi_reset(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@ static void wl1251_spi_wake(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_reset_wake(struct wl1251 *wl)
@@ -281,6 +285,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
 
        wl->use_eeprom = pdata->use_eeprom;
 
+       irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
        ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
        if (ret < 0) {
                wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +294,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
 
        irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
 
-       disable_irq(wl->irq);
-
        ret = wl1251_init_ieee80211(wl);
        if (ret)
                goto out_irq;
index 54156b0b5c2d7defe6b42791d022d4c982fd945f..d7b907e67170348e70302e6a1cfe70642a8c6f7e 100644 (file)
@@ -1,7 +1,6 @@
 config WLCORE
        tristate "TI wlcore support"
        depends on WL_TI && GENERIC_HARDIRQS && MAC80211
-       depends on INET
        select FW_LOADER
        ---help---
          This module contains the main code for TI WLAN chips.  It abstracts
index 509aa881d790fa4ae7be894c7dc8fa362e161114..f3d6fa5082696c145b30b453fd027ce4cdd88a11 100644 (file)
@@ -1715,6 +1715,7 @@ out:
 
 }
 
+#ifdef CONFIG_PM
 /* Set the global behaviour of RX filters - On/Off + default action */
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
                                        enum rx_filter_action action)
@@ -1794,3 +1795,4 @@ out:
        kfree(acx);
        return ret;
 }
+#endif /* CONFIG_PM */
index 8106b2ebfe607dd921a87565a16f97f4339de355..e6a74869a5ff539df5589e90e73bb12d98f34747 100644 (file)
@@ -1330,9 +1330,11 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
 int wl1271_acx_fm_coex(struct wl1271 *wl);
 int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
 int wl12xx_acx_config_hangover(struct wl1271 *wl);
+
+#ifdef CONFIG_PM
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
                                        enum rx_filter_action action);
 int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
                             struct wl12xx_rx_filter *filter);
-
+#endif /* CONFIG_PM */
 #endif /* __WL1271_ACX_H__ */
index 1f1d9488dfb6b26a2482d88e66b957e4406b474b..d6a3c6b07827738bbc3e0f3ea0ea977e1a6e9dad 100644 (file)
@@ -279,6 +279,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
        wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
 
+#ifdef CONFIG_PM
 int wl1271_rx_filter_enable(struct wl1271 *wl,
                            int index, bool enable,
                            struct wl12xx_rx_filter *filter)
@@ -314,3 +315,4 @@ void wl1271_rx_filter_clear_all(struct wl1271 *wl)
                wl1271_rx_filter_enable(wl, i, 0, NULL);
        }
 }
+#endif /* CONFIG_PM */
index 2596401308a86e24210efd65d531d91d6be60746..f4a6fcaeffb1db381ef9fd007a72578d984a3b4c 100644 (file)
@@ -325,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
        unsigned int count;
        int i, copy_off;
 
-       count = DIV_ROUND_UP(
-                       offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+       count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
 
        copy_off = skb_headlen(skb) % PAGE_SIZE;
 
index 2027afe405fed60de8d0bf2dbe85a06456edcf28..30899901aef56b00e05c5f062c86cabcb54127d8 100644 (file)
@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
-       unregister_netdev(info->netdev);
-
        xennet_disconnect_backend(info);
 
-       del_timer_sync(&info->rx_refill_timer);
-
        xennet_sysfs_delif(info->netdev);
 
+       unregister_netdev(info->netdev);
+
+       del_timer_sync(&info->rx_refill_timer);
+
        free_percpu(info->stats);
 
        free_netdev(info->netdev);
index 46f4a9f9f5e476ce90729e4386c74442abcfa05f..281f18c2fb8282670c4dd6dab4593ab4ef3cc4b8 100644 (file)
@@ -232,7 +232,7 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
 
 static int check_crc(u8 *buf, int buflen)
 {
-       u8 len;
+       int len;
        u16 crc;
 
        len = buf[0] + 1;
index 343ad29e211c66768491e325046ff0b58bcb15ec..e44f8c2d239d253afc045164834f0476b94cf932 100644 (file)
@@ -317,10 +317,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
        for(; lookup->compatible != NULL; lookup++) {
                if (!of_device_is_compatible(np, lookup->compatible))
                        continue;
-               if (of_address_to_resource(np, 0, &res))
-                       continue;
-               if (res.start != lookup->phys_addr)
-                       continue;
+               if (!of_address_to_resource(np, 0, &res))
+                       if (res.start != lookup->phys_addr)
+                               continue;
                pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
                return lookup;
        }
@@ -462,4 +461,5 @@ int of_platform_populate(struct device_node *root,
        of_node_put(root);
        return rc;
 }
+EXPORT_SYMBOL_GPL(of_platform_populate);
 #endif /* CONFIG_OF_ADDRESS */
index da14432806c6e1662e536c2c18c21763094a5329..efc4b7f308cfd6e1c37a44a0407c6e070f9d514e 100644 (file)
@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
 static DEFINE_MUTEX(oprofile_perf_mutex);
 
 static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static struct perf_event **perf_events[NR_CPUS];
 static int num_counters;
 
 /*
index bf0cee629b60f8bb763b4407f631e7ba7e778aa8..099f46cd8e87a4ca823dc24f707458857f26eaa5 100644 (file)
@@ -748,6 +748,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        pci_pm_set_unknown_state(pci_dev);
 
+       /*
+        * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+        * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+        * hasn't been quiesced and tries to turn it off.  If the controller
+        * is already in D3, this can hang or cause memory corruption.
+        *
+        * Since the value of the COMMAND register doesn't matter once the
+        * device has been suspended, we can safely set it to 0 here.
+        */
+       if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+               pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
        return 0;
 }
 
index c3b331b74fa0a5702300745b0d6423a19d88d30a..0cc053af70bd361e806a94993f64bb4077e69b88 100644 (file)
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps);
        list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
                for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
                        _i_ < _maps_node_->num_maps; \
-                       i++, _map_ = &_maps_node_->maps[_i_])
+                       _i_++, _map_ = &_maps_node_->maps[_i_])
 
 /**
  * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
index f6e7c670906cb16644f61ec83e99bba88f30e666..90c837f469a612be02304415a85c384d7b9363af 100644 (file)
 #include "core.h"
 #include "pinctrl-imx.h"
 
-#define IMX_PMX_DUMP(info, p, m, c, n)         \
-{                                              \
-       int i, j;                               \
-       printk("Format: Pin Mux Config\n");     \
-       for (i = 0; i < n; i++) {               \
-               j = p[i];                       \
-               printk("%s %d 0x%lx\n",         \
-                       info->pins[j].name,     \
-                       m[i], c[i]);            \
-       }                                       \
+#define IMX_PMX_DUMP(info, p, m, c, n)                 \
+{                                                      \
+       int i, j;                                       \
+       printk(KERN_DEBUG "Format: Pin Mux Config\n");  \
+       for (i = 0; i < n; i++) {                       \
+               j = p[i];                               \
+               printk(KERN_DEBUG "%s %d 0x%lx\n",      \
+                       info->pins[j].name,             \
+                       m[i], c[i]);                    \
+       }                                               \
 }
 
 /* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
 
        /* create mux map */
        parent = of_get_parent(np);
-       if (!parent)
+       if (!parent) {
+               kfree(new_map);
                return -EINVAL;
+       }
        new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
        new_map[0].data.mux.function = parent->name;
        new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
        }
 
        dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
-               new_map->data.mux.function, new_map->data.mux.group, map_num);
+               (*map)->data.mux.function, (*map)->data.mux.group, map_num);
 
        return 0;
 }
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
 static void imx_dt_free_map(struct pinctrl_dev *pctldev,
                                struct pinctrl_map *map, unsigned num_maps)
 {
-       int i;
-
-       for (i = 0; i < num_maps; i++)
-               kfree(map);
+       kfree(map);
 }
 
 static struct pinctrl_ops imx_pctrl_ops = {
@@ -478,6 +477,7 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
 #ifdef DEBUG
        IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
 #endif
+
        return 0;
 }
 
index 7737d4d71a3cc7fa8541a603615a29b96b9f173f..e9bf71fbedcafc9a14f14d662d6ec6ca1d7436bc 100644 (file)
@@ -1950,6 +1950,8 @@ static struct imx_pin_reg imx6q_pin_regs[] = {
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
+       IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
+       IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
 };
 
 /* Pad names for the pinmux subsystem */
index 556e45a213eb1997f0ce351e884f4bf958dbe3cf..4ba4636b6a4ac6b67d30b366299b2452bb983b76 100644 (file)
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
 
                /* Compose group name */
                group = kzalloc(length, GFP_KERNEL);
-               if (!group)
-                       return -ENOMEM;
+               if (!group) {
+                       ret = -ENOMEM;
+                       goto free;
+               }
                snprintf(group, length, "%s.%d", np->name, reg);
                new_map[i].data.mux.group = group;
                i++;
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
                pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
                if (!pconfig) {
                        ret = -ENOMEM;
-                       goto free;
+                       goto free_group;
                }
 
                new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
 
        return 0;
 
+free_group:
+       if (!purecfg)
+               kfree(group);
 free:
        kfree(new_map);
        return ret;
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
        return 0;
 
 err:
+       platform_set_drvdata(pdev, NULL);
        iounmap(d->base);
        return ret;
 }
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
 {
        struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
 
+       platform_set_drvdata(pdev, NULL);
        pinctrl_unregister(d->pctl);
        iounmap(d->base);
 
index b8e01c3eaa95fdd9186ced5a85a4fb1e45d65c19..3e7e47d6b38526178b0985bf0e7162f8fab16d70 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/slab.h>
+#include <linux/of_device.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/pinconf.h>
@@ -672,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
         * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
         */
        if (nmk_chip->sleepmode && on) {
-               __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base,
+               __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
                                    NMK_GPIO_SLPM_WAKEUP_ENABLE);
        }
 
@@ -1245,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
                ret = PTR_ERR(clk);
                goto out_unmap;
        }
+       clk_prepare(clk);
 
        nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
        if (!nmk_chip) {
@@ -1436,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
 
        dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
 
-       /* Handle this special glitch on altfunction C */
+       /*
+        * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
+        * we may pass through an undesired state. In this case we take
+        * some extra care.
+        *
+        * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+        *  - Save SLPM registers (since we have a shadow register in the
+        *    nmk_chip we're using that as backup)
+        *  - Set SLPM=0 for the IOs you want to switch and others to 1
+        *  - Configure the GPIO registers for the IOs that are being switched
+        *  - Set IOFORCE=1
+        *  - Modify the AFLSA/B registers for the IOs that are being switched
+        *  - Set IOFORCE=0
+        *  - Restore SLPM registers
+        *  - Any spurious wake up event during switch sequence to be ignored
+        *    and cleared
+        *
+        * We REALLY need to save ALL slpm registers, because the external
+        * IOFORCE will switch *all* ports to their sleepmode setting to as
+        * to avoid glitches. (Not just one port!)
+        */
        glitch = (g->altsetting == NMK_GPIO_ALT_C);
 
        if (glitch) {
@@ -1688,18 +1710,34 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
        .owner = THIS_MODULE,
 };
 
+static const struct of_device_id nmk_pinctrl_match[] = {
+       {
+               .compatible = "stericsson,nmk_pinctrl",
+               .data = (void *)PINCTRL_NMK_DB8500,
+       },
+       {},
+};
+
 static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
 {
        const struct platform_device_id *platid = platform_get_device_id(pdev);
+       struct device_node *np = pdev->dev.of_node;
        struct nmk_pinctrl *npct;
+       unsigned int version = 0;
        int i;
 
        npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
        if (!npct)
                return -ENOMEM;
 
+       if (platid)
+               version = platid->driver_data;
+       else if (np)
+               version = (unsigned int)
+                       of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
+
        /* Poke in other ASIC variants here */
-       if (platid->driver_data == PINCTRL_NMK_DB8500)
+       if (version == PINCTRL_NMK_DB8500)
                nmk_pinctrl_db8500_init(&npct->soc);
 
        /*
@@ -1758,6 +1796,7 @@ static struct platform_driver nmk_pinctrl_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "pinctrl-nomadik",
+               .of_match_table = nmk_pinctrl_match,
        },
        .probe = nmk_pinctrl_probe,
        .id_table = nmk_pinctrl_id,
index ba15b1a29e524103563a6035ebd4250e01417d0f..e9f8e7d110018c14d6011ccf8903e086af983ce3 100644 (file)
@@ -1184,7 +1184,7 @@ out_no_gpio_remap:
        return ret;
 }
 
-static const struct of_device_id pinmux_ids[]  = {
+static const struct of_device_id pinmux_ids[] __devinitconst = {
        { .compatible = "sirf,prima2-gpio-pinmux" },
        {}
 };
index 5ae50aadf88548ef9267414427bc76c14b0c7b68..b3f6b2873fdd4cb0ebba5e249913846c2ce51985 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired from:
  * - U300 Pinctl drivers
index 9155783bb47fbd332648fecff577b67a0519e1f1..d950eb78d939679af768c754f45adea5ea2a7f23 100644 (file)
@@ -2,7 +2,7 @@
  * Driver header file for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fff168be7f0062ae4bd0049460aaef831a37f48a..d6cca8c81b92cf9cd9cb63f9d972965e46670249 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void)
 }
 module_exit(spear1310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
index a8ab2a6f51bf313eb51421f15870b545a6e0cd7b..a0eb057e55bd3f91e390364a9187ee1f4e0768ab 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1340 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void)
 }
 module_exit(spear1340_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
index 9c82a35e4e788212be656e98101b954f661e2dce..4dfc2849b1728d33a913958fbe0bda5fc67c0001 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr300 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
 }
 module_exit(spear300_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
index 1a97076051254f87095dbda35a1f49dadbb76e84..96883693fb7eeb9d2b5b403ce0a17b10a2bcc94e 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
 }
 module_exit(spear310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
index de726e6c283a611afe19095d2f1ae7c7af7191f8..020b1e0bdb3ee77c0e4fd254e91467c3436604e9 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr320 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
 }
 module_exit(spear320_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
index 91c883bc46a6b1e0ab40f6446a736936210a3c8b..0242378f7cb86c677a055d9c2c6bcc93797efad7 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 5d5fdd8df7b8e8e99ad7387e91cb420ece60cbcd..31f44347f17ccfd32e3ca0f0913d85b2998aaa05 100644 (file)
@@ -2,7 +2,7 @@
  * Header file for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c1a3fd8e12438bb0d4a995f107981edfe7c68a89..ce875dc365e5f2ed0fa61dda0162fd1bb5be3cd1 100644 (file)
@@ -523,6 +523,30 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
                },
        },
+       {
+               .callback = video_set_backlight_video_vendor,
+               .ident = "Acer Extensa 5235",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
+               },
+       },
+       {
+               .callback = video_set_backlight_video_vendor,
+               .ident = "Acer TravelMate 5760",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
+               },
+       },
+       {
+               .callback = video_set_backlight_video_vendor,
+               .ident = "Acer Aspire 5750",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+               },
+       },
        {}
 };
 
index 639db4d0aa768ef70f3be5293fc5209c8ba60cb8..2fd9d36acd159f20cd2885ea70a633cee6415f77 100644 (file)
@@ -5,7 +5,7 @@
  *
  * (C) 2009 - Peter Feuerer     peter (a) piie.net
  *                              http://piie.net
- *     2009 Borislav Petkov <petkovbb@gmail.com>
+ *     2009 Borislav Petkov    bp (a) alien8.de
  *
  * Inspired by and many thanks to:
  *  o acerfand   - Rachel Greenham
index 8a582bdfdc76e3d20a9bef63e1458035999776b3..694a15a56230668c4eee12c7f5e7bc421a09cb0a 100644 (file)
@@ -87,6 +87,9 @@ static int gmux_update_status(struct backlight_device *bd)
        struct apple_gmux_data *gmux_data = bl_get_data(bd);
        u32 brightness = bd->props.brightness;
 
+       if (bd->props.state & BL_CORE_SUSPENDED)
+               return 0;
+
        /*
         * Older gmux versions require writing out lower bytes first then
         * setting the upper byte to 0 to flush the values. Newer versions
@@ -102,6 +105,7 @@ static int gmux_update_status(struct backlight_device *bd)
 }
 
 static const struct backlight_ops gmux_bl_ops = {
+       .options = BL_CORE_SUSPENDRESUME,
        .get_brightness = gmux_get_brightness,
        .update_status = gmux_update_status,
 };
index e6c08ee8d46c0acb5d1652b2627bc65b5f2d4922..5f78aac9b163bf21c957f0beccbdff80d095228a 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/err.h>
 #include <linux/dmi.h>
 #include <linux/io.h>
-#include <linux/rfkill.h>
 #include <linux/power_supply.h>
 #include <linux/acpi.h>
 #include <linux/mm.h>
@@ -90,11 +89,8 @@ static struct platform_driver platform_driver = {
 
 static struct platform_device *platform_device;
 static struct backlight_device *dell_backlight_device;
-static struct rfkill *wifi_rfkill;
-static struct rfkill *bluetooth_rfkill;
-static struct rfkill *wwan_rfkill;
 
-static const struct dmi_system_id __initdata dell_device_table[] = {
+static const struct dmi_system_id dell_device_table[] __initconst = {
        {
                .ident = "Dell laptop",
                .matches = {
@@ -119,96 +115,94 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
 };
 MODULE_DEVICE_TABLE(dmi, dell_device_table);
 
-static struct dmi_system_id __devinitdata dell_blacklist[] = {
-       /* Supported by compal-laptop */
-       {
-               .ident = "Dell Mini 9",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
-               },
-       },
+static struct dmi_system_id __devinitdata dell_quirks[] = {
        {
-               .ident = "Dell Mini 10",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro V130",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Mini 10v",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro V131",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Mini 1012",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro 3350",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Inspiron 11z",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro 3555",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Mini 12",
+               .callback = dmi_matched,
+               .ident = "Dell Inspiron N311z",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
-       {}
-};
-
-static struct dmi_system_id __devinitdata dell_quirks[] = {
        {
                .callback = dmi_matched,
-               .ident = "Dell Vostro V130",
+               .ident = "Dell Inspiron M5110",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Vostro V131",
+               .ident = "Dell Vostro 3360",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Vostro 3555",
+               .ident = "Dell Vostro 3460",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Inspiron N311z",
+               .ident = "Dell Vostro 3560",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Inspiron M5110",
+               .ident = "Dell Vostro 3450",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
@@ -305,94 +299,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
        return buffer;
 }
 
-/* Derived from information in DellWirelessCtl.cpp:
-   Class 17, select 11 is radio control. It returns an array of 32-bit values.
-
-   Input byte 0 = 0: Wireless information
-
-   result[0]: return code
-   result[1]:
-     Bit 0:      Hardware switch supported
-     Bit 1:      Wifi locator supported
-     Bit 2:      Wifi is supported
-     Bit 3:      Bluetooth is supported
-     Bit 4:      WWAN is supported
-     Bit 5:      Wireless keyboard supported
-     Bits 6-7:   Reserved
-     Bit 8:      Wifi is installed
-     Bit 9:      Bluetooth is installed
-     Bit 10:     WWAN is installed
-     Bits 11-15: Reserved
-     Bit 16:     Hardware switch is on
-     Bit 17:     Wifi is blocked
-     Bit 18:     Bluetooth is blocked
-     Bit 19:     WWAN is blocked
-     Bits 20-31: Reserved
-   result[2]: NVRAM size in bytes
-   result[3]: NVRAM format version number
-
-   Input byte 0 = 2: Wireless switch configuration
-   result[0]: return code
-   result[1]:
-     Bit 0:      Wifi controlled by switch
-     Bit 1:      Bluetooth controlled by switch
-     Bit 2:      WWAN controlled by switch
-     Bits 3-6:   Reserved
-     Bit 7:      Wireless switch config locked
-     Bit 8:      Wifi locator enabled
-     Bits 9-14:  Reserved
-     Bit 15:     Wifi locator setting locked
-     Bits 16-31: Reserved
-*/
-
-static int dell_rfkill_set(void *data, bool blocked)
-{
-       int disable = blocked ? 1 : 0;
-       unsigned long radio = (unsigned long)data;
-       int hwswitch_bit = (unsigned long)data - 1;
-       int ret = 0;
-
-       get_buffer();
-       dell_send_request(buffer, 17, 11);
-
-       /* If the hardware switch controls this radio, and the hardware
-          switch is disabled, don't allow changing the software state */
-       if ((hwswitch_state & BIT(hwswitch_bit)) &&
-           !(buffer->output[1] & BIT(16))) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       buffer->input[0] = (1 | (radio<<8) | (disable << 16));
-       dell_send_request(buffer, 17, 11);
-
-out:
-       release_buffer();
-       return ret;
-}
-
-static void dell_rfkill_query(struct rfkill *rfkill, void *data)
-{
-       int status;
-       int bit = (unsigned long)data + 16;
-       int hwswitch_bit = (unsigned long)data - 1;
-
-       get_buffer();
-       dell_send_request(buffer, 17, 11);
-       status = buffer->output[1];
-       release_buffer();
-
-       rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
-
-       if (hwswitch_state & (BIT(hwswitch_bit)))
-               rfkill_set_hw_state(rfkill, !(status & BIT(16)));
-}
-
-static const struct rfkill_ops dell_rfkill_ops = {
-       .set_block = dell_rfkill_set,
-       .query = dell_rfkill_query,
-};
-
 static struct dentry *dell_laptop_dir;
 
 static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -462,108 +368,6 @@ static const struct file_operations dell_debugfs_fops = {
        .release = single_release,
 };
 
-static void dell_update_rfkill(struct work_struct *ignored)
-{
-       if (wifi_rfkill)
-               dell_rfkill_query(wifi_rfkill, (void *)1);
-       if (bluetooth_rfkill)
-               dell_rfkill_query(bluetooth_rfkill, (void *)2);
-       if (wwan_rfkill)
-               dell_rfkill_query(wwan_rfkill, (void *)3);
-}
-static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-
-
-static int __init dell_setup_rfkill(void)
-{
-       int status;
-       int ret;
-
-       if (dmi_check_system(dell_blacklist)) {
-               pr_info("Blacklisted hardware detected - not enabling rfkill\n");
-               return 0;
-       }
-
-       get_buffer();
-       dell_send_request(buffer, 17, 11);
-       status = buffer->output[1];
-       buffer->input[0] = 0x2;
-       dell_send_request(buffer, 17, 11);
-       hwswitch_state = buffer->output[1];
-       release_buffer();
-
-       if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
-               wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
-                                          RFKILL_TYPE_WLAN,
-                                          &dell_rfkill_ops, (void *) 1);
-               if (!wifi_rfkill) {
-                       ret = -ENOMEM;
-                       goto err_wifi;
-               }
-               ret = rfkill_register(wifi_rfkill);
-               if (ret)
-                       goto err_wifi;
-       }
-
-       if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
-               bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
-                                               &platform_device->dev,
-                                               RFKILL_TYPE_BLUETOOTH,
-                                               &dell_rfkill_ops, (void *) 2);
-               if (!bluetooth_rfkill) {
-                       ret = -ENOMEM;
-                       goto err_bluetooth;
-               }
-               ret = rfkill_register(bluetooth_rfkill);
-               if (ret)
-                       goto err_bluetooth;
-       }
-
-       if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
-               wwan_rfkill = rfkill_alloc("dell-wwan",
-                                          &platform_device->dev,
-                                          RFKILL_TYPE_WWAN,
-                                          &dell_rfkill_ops, (void *) 3);
-               if (!wwan_rfkill) {
-                       ret = -ENOMEM;
-                       goto err_wwan;
-               }
-               ret = rfkill_register(wwan_rfkill);
-               if (ret)
-                       goto err_wwan;
-       }
-
-       return 0;
-err_wwan:
-       rfkill_destroy(wwan_rfkill);
-       if (bluetooth_rfkill)
-               rfkill_unregister(bluetooth_rfkill);
-err_bluetooth:
-       rfkill_destroy(bluetooth_rfkill);
-       if (wifi_rfkill)
-               rfkill_unregister(wifi_rfkill);
-err_wifi:
-       rfkill_destroy(wifi_rfkill);
-
-       return ret;
-}
-
-static void dell_cleanup_rfkill(void)
-{
-       if (wifi_rfkill) {
-               rfkill_unregister(wifi_rfkill);
-               rfkill_destroy(wifi_rfkill);
-       }
-       if (bluetooth_rfkill) {
-               rfkill_unregister(bluetooth_rfkill);
-               rfkill_destroy(bluetooth_rfkill);
-       }
-       if (wwan_rfkill) {
-               rfkill_unregister(wwan_rfkill);
-               rfkill_destroy(wwan_rfkill);
-       }
-}
-
 static int dell_send_intensity(struct backlight_device *bd)
 {
        int ret = 0;
@@ -655,30 +459,6 @@ static void touchpad_led_exit(void)
        led_classdev_unregister(&touchpad_led);
 }
 
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
-                             struct serio *port)
-{
-       static bool extended;
-
-       if (str & 0x20)
-               return false;
-
-       if (unlikely(data == 0xe0)) {
-               extended = true;
-               return false;
-       } else if (unlikely(extended)) {
-               switch (data) {
-               case 0x8:
-                       schedule_delayed_work(&dell_rfkill_work,
-                                             round_jiffies_relative(HZ));
-                       break;
-               }
-               extended = false;
-       }
-
-       return false;
-}
-
 static int __init dell_init(void)
 {
        int max_intensity = 0;
@@ -720,26 +500,10 @@ static int __init dell_init(void)
                goto fail_buffer;
        buffer = page_address(bufferpage);
 
-       ret = dell_setup_rfkill();
-
-       if (ret) {
-               pr_warn("Unable to setup rfkill\n");
-               goto fail_rfkill;
-       }
-
-       ret = i8042_install_filter(dell_laptop_i8042_filter);
-       if (ret) {
-               pr_warn("Unable to install key filter\n");
-               goto fail_filter;
-       }
-
        if (quirks && quirks->touchpad_led)
                touchpad_led_init(&platform_device->dev);
 
        dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
-       if (dell_laptop_dir != NULL)
-               debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
-                                   &dell_debugfs_fops);
 
 #ifdef CONFIG_ACPI
        /* In the event of an ACPI backlight being available, don't
@@ -782,11 +546,6 @@ static int __init dell_init(void)
        return 0;
 
 fail_backlight:
-       i8042_remove_filter(dell_laptop_i8042_filter);
-       cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
-       dell_cleanup_rfkill();
-fail_rfkill:
        free_page((unsigned long)bufferpage);
 fail_buffer:
        platform_device_del(platform_device);
@@ -804,10 +563,7 @@ static void __exit dell_exit(void)
        debugfs_remove_recursive(dell_laptop_dir);
        if (quirks && quirks->touchpad_led)
                touchpad_led_exit();
-       i8042_remove_filter(dell_laptop_i8042_filter);
-       cancel_delayed_work_sync(&dell_rfkill_work);
        backlight_device_unregister(dell_backlight_device);
-       dell_cleanup_rfkill();
        if (platform_device) {
                platform_device_unregister(platform_device);
                platform_driver_unregister(&platform_driver);
index 580d80a73c3adab888ba7fb07adb323c4c48883a..da267eae8ba85bc151fef55a0255a9d850429141 100644 (file)
@@ -16,6 +16,8 @@
  * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -34,7 +36,8 @@
 #define ACPI_FUJITSU_CLASS "fujitsu"
 
 #define INVERT_TABLET_MODE_BIT      0x01
-#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
+#define INVERT_DOCK_STATE_BIT       0x02
+#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
 
 #define KEYMAP_LEN 16
 
@@ -161,6 +164,8 @@ static void fujitsu_send_state(void)
        state = fujitsu_read_register(0xdd);
 
        dock = state & 0x02;
+       if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
+               dock = !dock;
 
        if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
                tablet_mode = 1;
@@ -221,9 +226,6 @@ static int __devinit input_fujitsu_setup(struct device *parent,
        input_set_capability(idev, EV_SW, SW_DOCK);
        input_set_capability(idev, EV_SW, SW_TABLET_MODE);
 
-       input_set_capability(idev, EV_SW, SW_DOCK);
-       input_set_capability(idev, EV_SW, SW_TABLET_MODE);
-
        error = input_register_device(idev);
        if (error) {
                input_free_device(idev);
@@ -275,25 +277,31 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
+static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
 {
-       printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
+       pr_info("%s\n", dmi->ident);
        memcpy(fujitsu.config.keymap, dmi->driver_data,
                        sizeof(fujitsu.config.keymap));
+}
+
+static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
+{
+       fujitsu_dmi_common(dmi);
+       fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
        return 1;
 }
 
 static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
 {
-       fujitsu_dmi_default(dmi);
+       fujitsu_dmi_common(dmi);
        fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
-       fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
+       fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
        return 1;
 }
 
 static struct dmi_system_id dmi_ids[] __initconst = {
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Fujitsu Siemens P/T Series",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -302,7 +310,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
                .driver_data = keymap_Lifebook_Tseries
        },
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Fujitsu Lifebook T Series",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -320,7 +328,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
                .driver_data = keymap_Stylistic_Tseries
        },
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Fujitsu LifeBook U810",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -347,7 +355,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
                .driver_data = keymap_Stylistic_ST5xxx
        },
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Unknown (using defaults)",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, ""),
@@ -473,6 +481,6 @@ module_exit(fujitsu_module_exit);
 MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
 MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION("2.4");
+MODULE_VERSION("2.5");
 
 MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
index 7387f97a294194b607115096510f2119d76902d0..24a3ae065f1b9ffce58f6c6aaa595f2384c11bac 100644 (file)
@@ -2,7 +2,7 @@
  * hdaps.c - driver for IBM's Hard Drive Active Protection System
  *
  * Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
  *
  * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
  * starting with the R40, T41, and X40.  It provides a basic two-axis
index e2faa3cbb792e3f154d83db97ca0f9f45ed44402..387183a2d6ddd4405c8430137e8da3c4595b5407 100644 (file)
@@ -634,6 +634,8 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
                                           RFKILL_TYPE_WLAN,
                                           &hp_wmi_rfkill_ops,
                                           (void *) HPWMI_WIFI);
+               if (!wifi_rfkill)
+                       return -ENOMEM;
                rfkill_init_sw_state(wifi_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_WIFI));
                rfkill_set_hw_state(wifi_rfkill,
@@ -648,6 +650,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
                                                RFKILL_TYPE_BLUETOOTH,
                                                &hp_wmi_rfkill_ops,
                                                (void *) HPWMI_BLUETOOTH);
+               if (!bluetooth_rfkill) {
+                       err = -ENOMEM;
+                       goto register_wifi_error;
+               }
                rfkill_init_sw_state(bluetooth_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
                rfkill_set_hw_state(bluetooth_rfkill,
@@ -662,6 +668,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
                                           RFKILL_TYPE_WWAN,
                                           &hp_wmi_rfkill_ops,
                                           (void *) HPWMI_WWAN);
+               if (!wwan_rfkill) {
+                       err = -ENOMEM;
+                       goto register_bluetooth_error;
+               }
                rfkill_init_sw_state(wwan_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_WWAN));
                rfkill_set_hw_state(wwan_rfkill,
index ac902f7a9baad76cbe4fb5982285044fe81a97e5..17f6dfd8dbfb093a332d7d9d9ebd572df9b850e7 100644 (file)
@@ -194,7 +194,6 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
 /*
  * debugfs
  */
-#define DEBUGFS_EVENT_LEN (4096)
 static int debugfs_status_show(struct seq_file *s, void *data)
 {
        unsigned long value;
@@ -315,7 +314,7 @@ static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
        node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
                                   &debugfs_status_fops);
        if (!node) {
-               pr_err("failed to create event in debugfs");
+               pr_err("failed to create status in debugfs");
                goto errout;
        }
 
@@ -695,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
 static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
 {
        int ret, i;
-       unsigned long cfg;
+       int cfg;
        struct ideapad_private *priv;
 
-       if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
+       if (read_method_int(adevice->handle, "_CFG", &cfg))
                return -ENODEV;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -722,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
                goto input_failed;
 
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
-               if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
+               if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
                        ideapad_register_rfkill(adevice, i);
                else
                        priv->rfk[i] = NULL;
@@ -785,6 +784,10 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
                        case 9:
                                ideapad_sync_rfk_state(priv);
                                break;
+                       case 13:
+                       case 6:
+                               ideapad_input_report(priv, vpc_bit);
+                               break;
                        case 4:
                                ideapad_backlight_notify_brightness(priv);
                                break;
@@ -795,7 +798,7 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
                                ideapad_backlight_notify_power(priv);
                                break;
                        default:
-                               ideapad_input_report(priv, vpc_bit);
+                               pr_info("Unknown event: %lu\n", vpc_bit);
                        }
                }
        }
index 0ffdb3cde2bbc3ff569fee774dcd842333585031..9af4257d49018443b19e447455969787b480a944 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/string.h>
 #include <linux/tick.h>
 #include <linux/timer.h>
+#include <linux/dmi.h>
 #include <drm/i915_drm.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
@@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
 
 MODULE_DEVICE_TABLE(pci, ips_id_table);
 
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+       pr_info("Blacklisted intel_ips for %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+       {
+               .callback = ips_blacklist_callback,
+               .ident = "HP ProBook",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+               },
+       },
+       { }     /* terminating entry */
+};
+
 static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        u64 platform_info;
@@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        u16 htshi, trc, trc_required_mask;
        u8 tse;
 
+       if (dmi_check_system(ips_blacklist))
+               return -ENODEV;
+
        ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
        if (!ips)
                return -ENOMEM;
index 8a51795aa02a07bbc0d60b6267686ca4f54dbe9b..d456ff0c73b73614339d4109c1bc282037c64e08 100644 (file)
@@ -141,6 +141,27 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
                 "(default: 0)");
 
 static void sony_nc_kbd_backlight_resume(void);
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+               unsigned int handle);
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+               unsigned int handle);
+static void sony_nc_battery_care_cleanup(struct platform_device *pd);
+
+static int sony_nc_thermal_setup(struct platform_device *pd);
+static void sony_nc_thermal_cleanup(struct platform_device *pd);
+static void sony_nc_thermal_resume(void);
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+                                 unsigned int handle);
+static void sony_nc_touchpad_cleanup(struct platform_device *pd);
 
 enum sony_nc_rfkill {
        SONY_WIFI,
@@ -153,6 +174,9 @@ enum sony_nc_rfkill {
 static int sony_rfkill_handle;
 static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
 static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+               unsigned int handle);
+static void sony_nc_rfkill_cleanup(void);
 static void sony_nc_rfkill_update(void);
 
 /*********** Input Devices ***********/
@@ -691,59 +715,97 @@ static struct acpi_device *sony_nc_acpi_device = NULL;
 
 /*
  * acpi_evaluate_object wrappers
+ * all useful calls into SNC methods take one or zero parameters and return
+ * integers or arrays.
  */
-static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+               u64 *value)
 {
-       struct acpi_buffer output;
-       union acpi_object out_obj;
+       union acpi_object *result = NULL;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
        acpi_status status;
 
-       output.length = sizeof(out_obj);
-       output.pointer = &out_obj;
+       if (value) {
+               struct acpi_object_list params;
+               union acpi_object in;
+               in.type = ACPI_TYPE_INTEGER;
+               in.integer.value = *value;
+               params.count = 1;
+               params.pointer = &in;
+               status = acpi_evaluate_object(handle, method, &params, &output);
+               dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
+                               (unsigned int)(*value >> 32),
+                               (unsigned int)*value & 0xffffffff);
+       } else {
+               status = acpi_evaluate_object(handle, method, NULL, &output);
+               dprintk("__call_snc_method: [%s]\n", method);
+       }
 
-       status = acpi_evaluate_object(handle, name, NULL, &output);
-       if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
-               *result = out_obj.integer.value;
-               return 0;
+       if (ACPI_FAILURE(status)) {
+               pr_err("Failed to evaluate [%s]\n", method);
+               return NULL;
        }
 
-       pr_warn("acpi_callreadfunc failed\n");
+       result = (union acpi_object *) output.pointer;
+       if (!result)
+               dprintk("No return object [%s]\n", method);
 
-       return -1;
+       return result;
 }
 
-static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
-                           int *result)
+static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
+               int *result)
 {
-       struct acpi_object_list params;
-       union acpi_object in_obj;
-       struct acpi_buffer output;
-       union acpi_object out_obj;
-       acpi_status status;
-
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = value;
+       union acpi_object *object = NULL;
+       if (value) {
+               u64 v = *value;
+               object = __call_snc_method(handle, name, &v);
+       } else
+               object = __call_snc_method(handle, name, NULL);
 
-       output.length = sizeof(out_obj);
-       output.pointer = &out_obj;
+       if (!object)
+               return -EINVAL;
 
-       status = acpi_evaluate_object(handle, name, &params, &output);
-       if (status == AE_OK) {
-               if (result != NULL) {
-                       if (out_obj.type != ACPI_TYPE_INTEGER) {
-                               pr_warn("acpi_evaluate_object bad return type\n");
-                               return -1;
-                       }
-                       *result = out_obj.integer.value;
-               }
-               return 0;
+       if (object->type != ACPI_TYPE_INTEGER) {
+               pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+                               ACPI_TYPE_INTEGER, object->type);
+               kfree(object);
+               return -EINVAL;
        }
 
-       pr_warn("acpi_evaluate_object failed\n");
+       if (result)
+               *result = object->integer.value;
+
+       kfree(object);
+       return 0;
+}
+
+#define MIN(a, b)      (a > b ? b : a)
+static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+               void *buffer, size_t buflen)
+{
+       size_t len = len;
+       union acpi_object *object = __call_snc_method(handle, name, value);
+
+       if (!object)
+               return -EINVAL;
+
+       if (object->type == ACPI_TYPE_BUFFER)
+               len = MIN(buflen, object->buffer.length);
+
+       else if (object->type == ACPI_TYPE_INTEGER)
+               len = MIN(buflen, sizeof(object->integer.value));
+
+       else {
+               pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+                               ACPI_TYPE_BUFFER, object->type);
+               kfree(object);
+               return -EINVAL;
+       }
 
-       return -1;
+       memcpy(buffer, object->buffer.pointer, len);
+       kfree(object);
+       return 0;
 }
 
 struct sony_nc_handles {
@@ -770,16 +832,17 @@ static ssize_t sony_nc_handles_show(struct device *dev,
 
 static int sony_nc_handles_setup(struct platform_device *pd)
 {
-       int i;
-       int result;
+       int i, r, result, arg;
 
        handles = kzalloc(sizeof(*handles), GFP_KERNEL);
        if (!handles)
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
-               if (!acpi_callsetfunc(sony_nc_acpi_handle,
-                                       "SN00", i + 0x20, &result)) {
+               arg = i + 0x20;
+               r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
+                                       &result);
+               if (!r) {
                        dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
                                        result, i);
                        handles->cap[i] = result;
@@ -819,8 +882,8 @@ static int sony_find_snc_handle(int handle)
        int i;
 
        /* not initialized yet, return early */
-       if (!handles)
-               return -1;
+       if (!handles || !handle)
+               return -EINVAL;
 
        for (i = 0; i < 0x10; i++) {
                if (handles->cap[i] == handle) {
@@ -830,21 +893,20 @@ static int sony_find_snc_handle(int handle)
                }
        }
        dprintk("handle 0x%.4x not found\n", handle);
-       return -1;
+       return -EINVAL;
 }
 
 static int sony_call_snc_handle(int handle, int argument, int *result)
 {
-       int ret = 0;
+       int arg, ret = 0;
        int offset = sony_find_snc_handle(handle);
 
        if (offset < 0)
-               return -1;
+               return offset;
 
-       ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument,
-                       result);
-       dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument,
-                       *result);
+       arg = offset | argument;
+       ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
+       dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
        return ret;
 }
 
@@ -889,14 +951,16 @@ static int boolean_validate(const int direction, const int value)
 static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
                              char *buffer)
 {
-       int value;
+       int value, ret = 0;
        struct sony_nc_value *item =
            container_of(attr, struct sony_nc_value, devattr);
 
        if (!*item->acpiget)
                return -EIO;
 
-       if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
+       ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
+                               &value);
+       if (ret < 0)
                return -EIO;
 
        if (item->validate)
@@ -910,6 +974,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
                               const char *buffer, size_t count)
 {
        int value;
+       int ret = 0;
        struct sony_nc_value *item =
            container_of(attr, struct sony_nc_value, devattr);
 
@@ -919,7 +984,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoint(buffer, 10, &value))
+               return -EINVAL;
 
        if (item->validate)
                value = item->validate(SNC_VALIDATE_IN, value);
@@ -927,8 +993,11 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
        if (value < 0)
                return value;
 
-       if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
+       ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+                              &value, NULL);
+       if (ret < 0)
                return -EIO;
+
        item->value = value;
        item->valid = 1;
        return count;
@@ -941,6 +1010,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
 struct sony_backlight_props {
        struct backlight_device *dev;
        int                     handle;
+       int                     cmd_base;
        u8                      offset;
        u8                      maxlvl;
 };
@@ -948,15 +1018,15 @@ struct sony_backlight_props sony_bl_props;
 
 static int sony_backlight_update_status(struct backlight_device *bd)
 {
-       return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-                               bd->props.brightness + 1, NULL);
+       int arg = bd->props.brightness + 1;
+       return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
 }
 
 static int sony_backlight_get_brightness(struct backlight_device *bd)
 {
        int value;
 
-       if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
+       if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
                return 0;
        /* brightness levels are 1-based, while backlight ones are 0-based */
        return value - 1;
@@ -968,7 +1038,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
        struct sony_backlight_props *sdev =
                (struct sony_backlight_props *)bl_get_data(bd);
 
-       sony_call_snc_handle(sdev->handle, 0x0200, &result);
+       sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
 
        return (result & 0xff) - sdev->offset;
 }
@@ -980,7 +1050,8 @@ static int sony_nc_update_status_ng(struct backlight_device *bd)
                (struct sony_backlight_props *)bl_get_data(bd);
 
        value = bd->props.brightness + sdev->offset;
-       if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
+       if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
+                               &result))
                return -EIO;
 
        return value;
@@ -1024,10 +1095,14 @@ static struct sony_nc_event sony_100_events[] = {
        { 0x06, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x87, SONYPI_EVENT_FNKEY_F7 },
        { 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+       { 0x88, SONYPI_EVENT_FNKEY_F8 },
+       { 0x08, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x89, SONYPI_EVENT_FNKEY_F9 },
        { 0x09, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x8A, SONYPI_EVENT_FNKEY_F10 },
        { 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+       { 0x8B, SONYPI_EVENT_FNKEY_F11 },
+       { 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x8C, SONYPI_EVENT_FNKEY_F12 },
        { 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
@@ -1063,63 +1138,139 @@ static struct sony_nc_event sony_127_events[] = {
        { 0, 0 },
 };
 
+static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
+{
+       int ret = -EINVAL;
+       unsigned int result = 0;
+       struct sony_nc_event *key_event;
+
+       if (sony_call_snc_handle(handle, 0x200, &result)) {
+               dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
+                               event);
+               return -EINVAL;
+       }
+
+       result &= 0xFF;
+
+       if (handle == 0x0100)
+               key_event = sony_100_events;
+       else
+               key_event = sony_127_events;
+
+       for (; key_event->data; key_event++) {
+               if (key_event->data == result) {
+                       ret = key_event->event;
+                       break;
+               }
+       }
+
+       if (!key_event->data)
+               pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
+                               event, result, handle);
+
+       return ret;
+}
+
 /*
  * ACPI callbacks
  */
+enum event_types {
+       HOTKEY = 1,
+       KILLSWITCH,
+       GFX_SWITCH
+};
 static void sony_nc_notify(struct acpi_device *device, u32 event)
 {
-       u32 ev = event;
+       u32 real_ev = event;
+       u8 ev_type = 0;
+       dprintk("sony_nc_notify, event: 0x%.2x\n", event);
+
+       if (event >= 0x90) {
+               unsigned int result = 0;
+               unsigned int arg = 0;
+               unsigned int handle = 0;
+               unsigned int offset = event - 0x90;
+
+               if (offset >= ARRAY_SIZE(handles->cap)) {
+                       pr_err("Event 0x%x outside of capabilities list\n",
+                                       event);
+                       return;
+               }
+               handle = handles->cap[offset];
+
+               /* list of handles known for generating events */
+               switch (handle) {
+               /* hotkey event */
+               case 0x0100:
+               case 0x0127:
+                       ev_type = HOTKEY;
+                       real_ev = sony_nc_hotkeys_decode(event, handle);
+
+                       if (real_ev > 0)
+                               sony_laptop_report_input_event(real_ev);
+                       else
+                               /* restore the original event for reporting */
+                               real_ev = event;
 
-       if (ev >= 0x90) {
-               /* New-style event */
-               int result;
-               int key_handle = 0;
-               ev -= 0x90;
-
-               if (sony_find_snc_handle(0x100) == ev)
-                       key_handle = 0x100;
-               if (sony_find_snc_handle(0x127) == ev)
-                       key_handle = 0x127;
-
-               if (key_handle) {
-                       struct sony_nc_event *key_event;
-
-                       if (sony_call_snc_handle(key_handle, 0x200, &result)) {
-                               dprintk("sony_nc_notify, unable to decode"
-                                       " event 0x%.2x 0x%.2x\n", key_handle,
-                                       ev);
-                               /* restore the original event */
-                               ev = event;
-                       } else {
-                               ev = result & 0xFF;
-
-                               if (key_handle == 0x100)
-                                       key_event = sony_100_events;
-                               else
-                                       key_event = sony_127_events;
-
-                               for (; key_event->data; key_event++) {
-                                       if (key_event->data == ev) {
-                                               ev = key_event->event;
-                                               break;
-                                       }
-                               }
+                       break;
 
-                               if (!key_event->data)
-                                       pr_info("Unknown event: 0x%x 0x%x\n",
-                                               key_handle, ev);
-                               else
-                                       sony_laptop_report_input_event(ev);
-                       }
-               } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) {
-                       sony_nc_rfkill_update();
-                       return;
+               /* wlan switch */
+               case 0x0124:
+               case 0x0135:
+                       /* events on this handle are reported when the
+                        * switch changes position or for battery
+                        * events. We'll notify both of them but only
+                        * update the rfkill device status when the
+                        * switch is moved.
+                        */
+                       ev_type = KILLSWITCH;
+                       sony_call_snc_handle(handle, 0x0100, &result);
+                       real_ev = result & 0x03;
+
+                       /* hw switch event */
+                       if (real_ev == 1)
+                               sony_nc_rfkill_update();
+
+                       break;
+
+               case 0x0128:
+               case 0x0146:
+                       /* Hybrid GFX switching */
+                       sony_call_snc_handle(handle, 0x0000, &result);
+                       dprintk("GFX switch event received (reason: %s)\n",
+                                       (result & 0x01) ?
+                                       "switch change" : "unknown");
+
+                       /* verify the switch state
+                        * 1: discrete GFX
+                        * 0: integrated GFX
+                        */
+                       sony_call_snc_handle(handle, 0x0100, &result);
+
+                       ev_type = GFX_SWITCH;
+                       real_ev = result & 0xff;
+                       break;
+
+               default:
+                       dprintk("Unknown event 0x%x for handle 0x%x\n",
+                                       event, handle);
+                       break;
                }
-       } else
-               sony_laptop_report_input_event(ev);
 
-       dprintk("sony_nc_notify, event: 0x%.2x\n", ev);
-       acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
+               /* clear the event (and the event reason when present) */
+               arg = 1 << offset;
+               sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
+
+       } else {
+               /* old style event */
+               ev_type = HOTKEY;
+               sony_laptop_report_input_event(real_ev);
+       }
+
+       acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
+
+       acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
+                       dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
 }
 
 static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -1140,20 +1291,190 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
 /*
  * ACPI device
  */
-static int sony_nc_function_setup(struct acpi_device *device)
+static void sony_nc_function_setup(struct acpi_device *device,
+               struct platform_device *pf_device)
 {
-       int result;
+       unsigned int i, result, bitmask, arg;
+
+       if (!handles)
+               return;
+
+       /* setup found handles here */
+       for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+               unsigned int handle = handles->cap[i];
+
+               if (!handle)
+                       continue;
+
+               dprintk("setting up handle 0x%.4x\n", handle);
+
+               switch (handle) {
+               case 0x0100:
+               case 0x0101:
+               case 0x0127:
+                       /* setup hotkeys */
+                       sony_call_snc_handle(handle, 0, &result);
+                       break;
+               case 0x0102:
+                       /* setup hotkeys */
+                       sony_call_snc_handle(handle, 0x100, &result);
+                       break;
+               case 0x0105:
+               case 0x0148:
+                       /* touchpad enable/disable */
+                       result = sony_nc_touchpad_setup(pf_device, handle);
+                       if (result)
+                               pr_err("couldn't set up touchpad control function (%d)\n",
+                                               result);
+                       break;
+               case 0x0115:
+               case 0x0136:
+               case 0x013f:
+                       result = sony_nc_battery_care_setup(pf_device, handle);
+                       if (result)
+                               pr_err("couldn't set up battery care function (%d)\n",
+                                               result);
+                       break;
+               case 0x0119:
+                       result = sony_nc_lid_resume_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up lid resume function (%d)\n",
+                                               result);
+                       break;
+               case 0x0122:
+                       result = sony_nc_thermal_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up thermal profile function (%d)\n",
+                                               result);
+                       break;
+               case 0x0131:
+                       result = sony_nc_highspeed_charging_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up high speed charging function (%d)\n",
+                                      result);
+                       break;
+               case 0x0124:
+               case 0x0135:
+                       result = sony_nc_rfkill_setup(device, handle);
+                       if (result)
+                               pr_err("couldn't set up rfkill support (%d)\n",
+                                               result);
+                       break;
+               case 0x0137:
+               case 0x0143:
+                       result = sony_nc_kbd_backlight_setup(pf_device, handle);
+                       if (result)
+                               pr_err("couldn't set up keyboard backlight function (%d)\n",
+                                               result);
+                       break;
+               default:
+                       continue;
+               }
+       }
 
        /* Enable all events */
-       acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result);
+       arg = 0x10;
+       if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+               sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+                               &result);
+}
 
-       /* Setup hotkeys */
-       sony_call_snc_handle(0x0100, 0, &result);
-       sony_call_snc_handle(0x0101, 0, &result);
-       sony_call_snc_handle(0x0102, 0x100, &result);
-       sony_call_snc_handle(0x0127, 0, &result);
+static void sony_nc_function_cleanup(struct platform_device *pd)
+{
+       unsigned int i, result, bitmask, handle;
 
-       return 0;
+       /* get enabled events and disable them */
+       sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
+       sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
+
+       /* cleanup handles here */
+       for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+
+               handle = handles->cap[i];
+
+               if (!handle)
+                       continue;
+
+               switch (handle) {
+               case 0x0105:
+               case 0x0148:
+                       sony_nc_touchpad_cleanup(pd);
+                       break;
+               case 0x0115:
+               case 0x0136:
+               case 0x013f:
+                       sony_nc_battery_care_cleanup(pd);
+                       break;
+               case 0x0119:
+                       sony_nc_lid_resume_cleanup(pd);
+                       break;
+               case 0x0122:
+                       sony_nc_thermal_cleanup(pd);
+                       break;
+               case 0x0131:
+                       sony_nc_highspeed_charging_cleanup(pd);
+                       break;
+               case 0x0124:
+               case 0x0135:
+                       sony_nc_rfkill_cleanup();
+                       break;
+               case 0x0137:
+               case 0x0143:
+                       sony_nc_kbd_backlight_cleanup(pd);
+                       break;
+               default:
+                       continue;
+               }
+       }
+
+       /* finally cleanup the handles list */
+       sony_nc_handles_cleanup(pd);
+}
+
+static void sony_nc_function_resume(void)
+{
+       unsigned int i, result, bitmask, arg;
+
+       dprintk("Resuming SNC device\n");
+
+       for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+               unsigned int handle = handles->cap[i];
+
+               if (!handle)
+                       continue;
+
+               switch (handle) {
+               case 0x0100:
+               case 0x0101:
+               case 0x0127:
+                       /* re-enable hotkeys */
+                       sony_call_snc_handle(handle, 0, &result);
+                       break;
+               case 0x0102:
+                       /* re-enable hotkeys */
+                       sony_call_snc_handle(handle, 0x100, &result);
+                       break;
+               case 0x0122:
+                       sony_nc_thermal_resume();
+                       break;
+               case 0x0124:
+               case 0x0135:
+                       sony_nc_rfkill_update();
+                       break;
+               case 0x0137:
+               case 0x0143:
+                       sony_nc_kbd_backlight_resume();
+                       break;
+               default:
+                       continue;
+               }
+       }
+
+       /* Enable all events */
+       arg = 0x10;
+       if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+               sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+                               &result);
 }
 
 static int sony_nc_resume(struct acpi_device *device)
@@ -1166,8 +1487,8 @@ static int sony_nc_resume(struct acpi_device *device)
 
                if (!item->valid)
                        continue;
-               ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
-                                      item->value, NULL);
+               ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+                                      &item->value, NULL);
                if (ret < 0) {
                        pr_err("%s: %d\n", __func__, ret);
                        break;
@@ -1176,21 +1497,14 @@ static int sony_nc_resume(struct acpi_device *device)
 
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
                                         &handle))) {
-               if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+               int arg = 1;
+               if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-                                        &handle))) {
-               dprintk("Doing SNC setup\n");
-               sony_nc_function_setup(device);
-       }
-
-       /* re-read rfkill state */
-       sony_nc_rfkill_update();
-
-       /* restore kbd backlight states */
-       sony_nc_kbd_backlight_resume();
+                                        &handle)))
+               sony_nc_function_resume();
 
        return 0;
 }
@@ -1213,7 +1527,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
        int argument = sony_rfkill_address[(long) data] + 0x100;
 
        if (!blocked)
-               argument |= 0xff0000;
+               argument |= 0x030000;
 
        return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
 }
@@ -1230,7 +1544,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
        enum rfkill_type type;
        const char *name;
        int result;
-       bool hwblock;
+       bool hwblock, swblock;
 
        switch (nc_type) {
        case SONY_WIFI:
@@ -1258,8 +1572,21 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
        if (!rfk)
                return -ENOMEM;
 
-       sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+       if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+               rfkill_destroy(rfk);
+               return -1;
+       }
        hwblock = !(result & 0x1);
+
+       if (sony_call_snc_handle(sony_rfkill_handle,
+                               sony_rfkill_address[nc_type],
+                               &result) < 0) {
+               rfkill_destroy(rfk);
+               return -1;
+       }
+       swblock = !(result & 0x2);
+
+       rfkill_init_sw_state(rfk, swblock);
        rfkill_set_hw_state(rfk, hwblock);
 
        err = rfkill_register(rfk);
@@ -1295,101 +1622,79 @@ static void sony_nc_rfkill_update(void)
 
                sony_call_snc_handle(sony_rfkill_handle, argument, &result);
                rfkill_set_states(sony_rfkill_devices[i],
-                                 !(result & 0xf), false);
+                                 !(result & 0x2), false);
        }
 }
 
-static void sony_nc_rfkill_setup(struct acpi_device *device)
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+               unsigned int handle)
 {
-       int offset;
-       u8 dev_code, i;
-       acpi_status status;
-       struct acpi_object_list params;
-       union acpi_object in_obj;
-       union acpi_object *device_enum;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
-       offset = sony_find_snc_handle(0x124);
-       if (offset == -1) {
-               offset = sony_find_snc_handle(0x135);
-               if (offset == -1)
-                       return;
-               else
-                       sony_rfkill_handle = 0x135;
-       } else
-               sony_rfkill_handle = 0x124;
-       dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
-
-       /* need to read the whole buffer returned by the acpi call to SN06
-        * here otherwise we may miss some features
-        */
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = offset;
-       status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
-                       &buffer);
-       if (ACPI_FAILURE(status)) {
-               dprintk("Radio device enumeration failed\n");
-               return;
-       }
-
-       device_enum = (union acpi_object *) buffer.pointer;
-       if (!device_enum) {
-               pr_err("No SN06 return object\n");
-               goto out_no_enum;
-       }
-       if (device_enum->type != ACPI_TYPE_BUFFER) {
-               pr_err("Invalid SN06 return object 0x%.2x\n",
-                      device_enum->type);
-               goto out_no_enum;
-       }
+       u64 offset;
+       int i;
+       unsigned char buffer[32] = { 0 };
 
-       /* the buffer is filled with magic numbers describing the devices
-        * available, 0xff terminates the enumeration
+       offset = sony_find_snc_handle(handle);
+       sony_rfkill_handle = handle;
+
+       i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+                       32);
+       if (i < 0)
+               return i;
+
+       /* The buffer is filled with magic numbers describing the devices
+        * available, 0xff terminates the enumeration.
+        * Known codes:
+        *      0x00 WLAN
+        *      0x10 BLUETOOTH
+        *      0x20 WWAN GPRS-EDGE
+        *      0x21 WWAN HSDPA
+        *      0x22 WWAN EV-DO
+        *      0x23 WWAN GPS
+        *      0x25 Gobi WWAN no GPS
+        *      0x26 Gobi WWAN + GPS
+        *      0x28 Gobi WWAN no GPS
+        *      0x29 Gobi WWAN + GPS
+        *      0x30 WIMAX
+        *      0x50 Gobi WWAN no GPS
+        *      0x51 Gobi WWAN + GPS
+        *      0x70 no SIM card slot
+        *      0x71 SIM card slot
         */
-       for (i = 0; i < device_enum->buffer.length; i++) {
+       for (i = 0; i < ARRAY_SIZE(buffer); i++) {
 
-               dev_code = *(device_enum->buffer.pointer + i);
-               if (dev_code == 0xff)
+               if (buffer[i] == 0xff)
                        break;
 
-               dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
+               dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
 
-               if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
+               if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
                        sony_nc_setup_rfkill(device, SONY_WIFI);
 
-               if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
+               if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
                        sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
 
-               if ((0xf0 & dev_code) == 0x20 &&
+               if (((0xf0 & buffer[i]) == 0x20 ||
+                                       (0xf0 & buffer[i]) == 0x50) &&
                                !sony_rfkill_devices[SONY_WWAN])
                        sony_nc_setup_rfkill(device, SONY_WWAN);
 
-               if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
+               if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
                        sony_nc_setup_rfkill(device, SONY_WIMAX);
        }
-
-out_no_enum:
-       kfree(buffer.pointer);
-       return;
+       return 0;
 }
 
 /* Keyboard backlight feature */
-#define KBDBL_HANDLER  0x137
-#define KBDBL_PRESENT  0xB00
-#define        SET_MODE        0xC00
-#define SET_STATE      0xD00
-#define SET_TIMEOUT    0xE00
-
 struct kbd_backlight {
-       int mode;
-       int timeout;
+       unsigned int handle;
+       unsigned int base;
+       unsigned int mode;
+       unsigned int timeout;
        struct device_attribute mode_attr;
        struct device_attribute timeout_attr;
 };
 
-static struct kbd_backlight *kbdbl_handle;
+static struct kbd_backlight *kbdbl_ctl;
 
 static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
 {
@@ -1398,15 +1703,15 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
        if (value > 1)
                return -EINVAL;
 
-       if (sony_call_snc_handle(KBDBL_HANDLER,
-                               (value << 0x10) | SET_MODE, &result))
+       if (sony_call_snc_handle(kbdbl_ctl->handle,
+                               (value << 0x10) | (kbdbl_ctl->base), &result))
                return -EIO;
 
        /* Try to turn the light on/off immediately */
-       sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
-                       &result);
+       sony_call_snc_handle(kbdbl_ctl->handle,
+                       (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
 
-       kbdbl_handle->mode = value;
+       kbdbl_ctl->mode = value;
 
        return 0;
 }
@@ -1421,7 +1726,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       if (strict_strtoul(buffer, 10, &value))
+       if (kstrtoul(buffer, 10, &value))
                return -EINVAL;
 
        ret = __sony_nc_kbd_backlight_mode_set(value);
@@ -1435,7 +1740,7 @@ static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
                struct device_attribute *attr, char *buffer)
 {
        ssize_t count = 0;
-       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode);
+       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
        return count;
 }
 
@@ -1446,11 +1751,11 @@ static int __sony_nc_kbd_backlight_timeout_set(u8 value)
        if (value > 3)
                return -EINVAL;
 
-       if (sony_call_snc_handle(KBDBL_HANDLER,
-                               (value << 0x10) | SET_TIMEOUT, &result))
+       if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
+                               (kbdbl_ctl->base + 0x200), &result))
                return -EIO;
 
-       kbdbl_handle->timeout = value;
+       kbdbl_ctl->timeout = value;
 
        return 0;
 }
@@ -1465,7 +1770,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       if (strict_strtoul(buffer, 10, &value))
+       if (kstrtoul(buffer, 10, &value))
                return -EINVAL;
 
        ret = __sony_nc_kbd_backlight_timeout_set(value);
@@ -1479,39 +1784,58 @@ static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
                struct device_attribute *attr, char *buffer)
 {
        ssize_t count = 0;
-       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout);
+       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
        return count;
 }
 
-static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+               unsigned int handle)
 {
        int result;
+       int ret = 0;
 
-       if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
-               return 0;
-       if (!(result & 0x02))
+       /* verify the kbd backlight presence, these handles are not used for
+        * keyboard backlight only
+        */
+       ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100,
+                       &result);
+       if (ret)
+               return ret;
+
+       if ((handle == 0x0137 && !(result & 0x02)) ||
+                       !(result & 0x01)) {
+               dprintk("no backlight keyboard found\n");
                return 0;
+       }
 
-       kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL);
-       if (!kbdbl_handle)
+       kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+       if (!kbdbl_ctl)
                return -ENOMEM;
 
-       sysfs_attr_init(&kbdbl_handle->mode_attr.attr);
-       kbdbl_handle->mode_attr.attr.name = "kbd_backlight";
-       kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
-       kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show;
-       kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store;
+       kbdbl_ctl->handle = handle;
+       if (handle == 0x0137)
+               kbdbl_ctl->base = 0x0C00;
+       else
+               kbdbl_ctl->base = 0x4000;
+
+       sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
+       kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
+       kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+       kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
+       kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
 
-       sysfs_attr_init(&kbdbl_handle->timeout_attr.attr);
-       kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout";
-       kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
-       kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
-       kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
+       sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
+       kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
+       kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
+       kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
+       kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
 
-       if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr))
+       ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
+       if (ret)
                goto outkzalloc;
 
-       if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr))
+       ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
+       if (ret)
                goto outmode;
 
        __sony_nc_kbd_backlight_mode_set(kbd_backlight);
@@ -1520,134 +1844,737 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
        return 0;
 
 outmode:
-       device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
+       device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
 outkzalloc:
-       kfree(kbdbl_handle);
-       kbdbl_handle = NULL;
-       return -1;
+       kfree(kbdbl_ctl);
+       kbdbl_ctl = NULL;
+       return ret;
 }
 
-static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
-       if (kbdbl_handle) {
+       if (kbdbl_ctl) {
                int result;
 
-               device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
-               device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+               device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
+               device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
 
                /* restore the default hw behaviour */
-               sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
-               sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+               sony_call_snc_handle(kbdbl_ctl->handle,
+                               kbdbl_ctl->base | 0x10000, &result);
+               sony_call_snc_handle(kbdbl_ctl->handle,
+                               kbdbl_ctl->base + 0x200, &result);
 
-               kfree(kbdbl_handle);
+               kfree(kbdbl_ctl);
+               kbdbl_ctl = NULL;
        }
-       return 0;
 }
 
 static void sony_nc_kbd_backlight_resume(void)
 {
        int ignore = 0;
 
-       if (!kbdbl_handle)
+       if (!kbdbl_ctl)
                return;
 
-       if (kbdbl_handle->mode == 0)
-               sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
-
-       if (kbdbl_handle->timeout != 0)
-               sony_call_snc_handle(KBDBL_HANDLER,
-                               (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+       if (kbdbl_ctl->mode == 0)
+               sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
                                &ignore);
+
+       if (kbdbl_ctl->timeout != 0)
+               sony_call_snc_handle(kbdbl_ctl->handle,
+                               (kbdbl_ctl->base + 0x200) |
+                               (kbdbl_ctl->timeout << 0x10), &ignore);
 }
 
-static void sony_nc_backlight_ng_read_limits(int handle,
-               struct sony_backlight_props *props)
+struct battery_care_control {
+       struct device_attribute attrs[2];
+       unsigned int handle;
+};
+static struct battery_care_control *bcare_ctl;
+
+static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
 {
-       int offset;
-       acpi_status status;
-       u8 brlvl, i;
-       u8 min = 0xff, max = 0x00;
-       struct acpi_object_list params;
-       union acpi_object in_obj;
-       union acpi_object *lvl_enum;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       unsigned int result, cmd;
+       unsigned long value;
 
-       props->handle = handle;
-       props->offset = 0;
-       props->maxlvl = 0xff;
+       if (count > 31)
+               return -EINVAL;
 
-       offset = sony_find_snc_handle(handle);
-       if (offset < 0)
-               return;
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
 
-       /* try to read the boundaries from ACPI tables, if we fail the above
-        * defaults should be reasonable
+       /*  limit values (2 bits):
+        *  00 - none
+        *  01 - 80%
+        *  10 - 50%
+        *  11 - 100%
+        *
+        *  bit 0: 0 disable BCL, 1 enable BCL
+        *  bit 1: 1 tell to store the battery limit (see bits 6,7) too
+        *  bits 2,3: reserved
+        *  bits 4,5: store the limit into the EC
+        *  bits 6,7: store the limit into the battery
         */
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = offset;
-       status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
-                       &buffer);
-       if (ACPI_FAILURE(status))
-               return;
+       cmd = 0;
 
-       lvl_enum = (union acpi_object *) buffer.pointer;
-       if (!lvl_enum) {
-               pr_err("No SN06 return object.");
-               return;
-       }
-       if (lvl_enum->type != ACPI_TYPE_BUFFER) {
-               pr_err("Invalid SN06 return object 0x%.2x\n",
-                      lvl_enum->type);
-               goto out_invalid;
-       }
+       if (value > 0) {
+               if (value <= 50)
+                       cmd = 0x20;
 
-       /* the buffer lists brightness levels available, brightness levels are
-        * from 0 to 8 in the array, other values are used by ALS control.
-        */
-       for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
+               else if (value <= 80)
+                       cmd = 0x10;
 
-               brlvl = *(lvl_enum->buffer.pointer + i);
-               dprintk("Brightness level: %d\n", brlvl);
+               else if (value <= 100)
+                       cmd = 0x30;
 
-               if (!brlvl)
-                       break;
+               else
+                       return -EINVAL;
 
-               if (brlvl > max)
-                       max = brlvl;
-               if (brlvl < min)
-                       min = brlvl;
+               /*
+                * handle 0x0115 should allow storing on battery too;
+                * handle 0x0136 same as 0x0115 + health status;
+                * handle 0x013f, same as 0x0136 but no storing on the battery
+                */
+               if (bcare_ctl->handle != 0x013f)
+                       cmd = cmd | (cmd << 2);
+
+               cmd = (cmd | 0x1) << 0x10;
        }
-       props->offset = min;
-       props->maxlvl = max;
-       dprintk("Brightness levels: min=%d max=%d\n", props->offset,
-                       props->maxlvl);
 
-out_invalid:
-       kfree(buffer.pointer);
-       return;
+       if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
+               return -EIO;
+
+       return count;
 }
 
-static void sony_nc_backlight_setup(void)
+static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
 {
-       acpi_handle unused;
-       int max_brightness = 0;
-       const struct backlight_ops *ops = NULL;
-       struct backlight_properties props;
-
-       if (sony_find_snc_handle(0x12f) != -1) {
-               ops = &sony_backlight_ng_ops;
-               sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
-               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+       unsigned int result, status;
 
-       } else if (sony_find_snc_handle(0x137) != -1) {
-               ops = &sony_backlight_ng_ops;
-               sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
-               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+       if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
+               return -EIO;
 
-       } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
-                                               &unused))) {
+       status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
+       switch (status) {
+       case 1:
+               status = 80;
+               break;
+       case 2:
+               status = 50;
+               break;
+       case 3:
+               status = 100;
+               break;
+       default:
+               status = 0;
+               break;
+       }
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t sony_nc_battery_care_health_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       ssize_t count = 0;
+       unsigned int health;
+
+       if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
+               return -EIO;
+
+       count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
+
+       return count;
+}
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+               unsigned int handle)
+{
+       int ret = 0;
+
+       bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
+       if (!bcare_ctl)
+               return -ENOMEM;
+
+       bcare_ctl->handle = handle;
+
+       sysfs_attr_init(&bcare_ctl->attrs[0].attr);
+       bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
+       bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+       bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
+       bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
+
+       ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
+       if (ret)
+               goto outkzalloc;
+
+       /* 0x0115 is for models with no health reporting capability */
+       if (handle == 0x0115)
+               return 0;
+
+       sysfs_attr_init(&bcare_ctl->attrs[1].attr);
+       bcare_ctl->attrs[1].attr.name = "battery_care_health";
+       bcare_ctl->attrs[1].attr.mode = S_IRUGO;
+       bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
+
+       ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
+       if (ret)
+               goto outlimiter;
+
+       return 0;
+
+outlimiter:
+       device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+
+outkzalloc:
+       kfree(bcare_ctl);
+       bcare_ctl = NULL;
+
+       return ret;
+}
+
+static void sony_nc_battery_care_cleanup(struct platform_device *pd)
+{
+       if (bcare_ctl) {
+               device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+               if (bcare_ctl->handle != 0x0115)
+                       device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
+
+               kfree(bcare_ctl);
+               bcare_ctl = NULL;
+       }
+}
+
+struct snc_thermal_ctrl {
+       unsigned int mode;
+       unsigned int profiles;
+       struct device_attribute mode_attr;
+       struct device_attribute profiles_attr;
+};
+static struct snc_thermal_ctrl *th_handle;
+
+#define THM_PROFILE_MAX 3
+static const char * const snc_thermal_profiles[] = {
+       "balanced",
+       "silent",
+       "performance"
+};
+
+static int sony_nc_thermal_mode_set(unsigned short mode)
+{
+       unsigned int result;
+
+       /* the thermal profile seems to be a two bit bitmask:
+        * lsb -> silent
+        * msb -> performance
+        * no bit set is the normal operation and is always valid
+        * Some vaio models only have "balanced" and "performance"
+        */
+       if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
+               return -EIO;
+
+       th_handle->mode = mode;
+
+       return 0;
+}
+
+static int sony_nc_thermal_mode_get(void)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0122, 0x0100, &result))
+               return -EIO;
+
+       return result & 0xff;
+}
+
+static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       short cnt;
+       size_t idx = 0;
+
+       for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
+               if (!cnt || (th_handle->profiles & cnt))
+                       idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
+                                       snc_thermal_profiles[cnt]);
+       }
+       idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
+
+       return idx;
+}
+
+static ssize_t sony_nc_thermal_mode_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned short cmd;
+       size_t len = count;
+
+       if (count == 0)
+               return -EINVAL;
+
+       /* skip the newline if present */
+       if (buffer[len - 1] == '\n')
+               len--;
+
+       for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
+               if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
+                       break;
+
+       if (sony_nc_thermal_mode_set(cmd))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_thermal_mode_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       ssize_t count = 0;
+       int mode = sony_nc_thermal_mode_get();
+
+       if (mode < 0)
+               return mode;
+
+       count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
+
+       return count;
+}
+
+static int sony_nc_thermal_setup(struct platform_device *pd)
+{
+       int ret = 0;
+       th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
+       if (!th_handle)
+               return -ENOMEM;
+
+       ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
+       if (ret) {
+               pr_warn("couldn't to read the thermal profiles\n");
+               goto outkzalloc;
+       }
+
+       ret = sony_nc_thermal_mode_get();
+       if (ret < 0) {
+               pr_warn("couldn't to read the current thermal profile");
+               goto outkzalloc;
+       }
+       th_handle->mode = ret;
+
+       sysfs_attr_init(&th_handle->profiles_attr.attr);
+       th_handle->profiles_attr.attr.name = "thermal_profiles";
+       th_handle->profiles_attr.attr.mode = S_IRUGO;
+       th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
+
+       sysfs_attr_init(&th_handle->mode_attr.attr);
+       th_handle->mode_attr.attr.name = "thermal_control";
+       th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+       th_handle->mode_attr.show = sony_nc_thermal_mode_show;
+       th_handle->mode_attr.store = sony_nc_thermal_mode_store;
+
+       ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
+       if (ret)
+               goto outkzalloc;
+
+       ret = device_create_file(&pd->dev, &th_handle->mode_attr);
+       if (ret)
+               goto outprofiles;
+
+       return 0;
+
+outprofiles:
+       device_remove_file(&pd->dev, &th_handle->profiles_attr);
+outkzalloc:
+       kfree(th_handle);
+       th_handle = NULL;
+       return ret;
+}
+
+static void sony_nc_thermal_cleanup(struct platform_device *pd)
+{
+       if (th_handle) {
+               device_remove_file(&pd->dev, &th_handle->profiles_attr);
+               device_remove_file(&pd->dev, &th_handle->mode_attr);
+               kfree(th_handle);
+               th_handle = NULL;
+       }
+}
+
+static void sony_nc_thermal_resume(void)
+{
+       unsigned int status = sony_nc_thermal_mode_get();
+
+       if (status != th_handle->mode)
+               sony_nc_thermal_mode_set(th_handle->mode);
+}
+
+/* resume on LID open */
+struct snc_lid_resume_control {
+       struct device_attribute attrs[3];
+       unsigned int status;
+};
+static struct snc_lid_resume_control *lid_ctl;
+
+static ssize_t sony_nc_lid_resume_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buffer, size_t count)
+{
+       unsigned int result, pos;
+       unsigned long value;
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       /* the value we have to write to SNC is a bitmask:
+        * +--------------+
+        * | S3 | S4 | S5 |
+        * +--------------+
+        *   2    1    0
+        */
+       if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+               pos = 2;
+       else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+               pos = 1;
+       else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+               pos = 0;
+       else
+               return -EINVAL;
+
+       if (value)
+               value = lid_ctl->status | (1 << pos);
+       else
+               value = lid_ctl->status & ~(1 << pos);
+
+       if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+               return -EIO;
+
+       lid_ctl->status = value;
+
+       return count;
+}
+
+static ssize_t sony_nc_lid_resume_show(struct device *dev,
+                                      struct device_attribute *attr, char *buffer)
+{
+       unsigned int pos;
+
+       if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+               pos = 2;
+       else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+               pos = 1;
+       else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+               pos = 0;
+       else
+               return -EINVAL;
+              
+       return snprintf(buffer, PAGE_SIZE, "%d\n",
+                       (lid_ctl->status >> pos) & 0x01);
+}
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd)
+{
+       unsigned int result;
+       int i;
+
+       if (sony_call_snc_handle(0x0119, 0x0000, &result))
+               return -EIO;
+
+       lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
+       if (!lid_ctl)
+               return -ENOMEM;
+
+       lid_ctl->status = result & 0x7;
+
+       sysfs_attr_init(&lid_ctl->attrs[0].attr);
+       lid_ctl->attrs[0].attr.name = "lid_resume_S3";
+       lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+       lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
+       lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
+
+       sysfs_attr_init(&lid_ctl->attrs[1].attr);
+       lid_ctl->attrs[1].attr.name = "lid_resume_S4";
+       lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
+       lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
+       lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
+
+       sysfs_attr_init(&lid_ctl->attrs[2].attr);
+       lid_ctl->attrs[2].attr.name = "lid_resume_S5";
+       lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
+       lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
+       lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
+
+       for (i = 0; i < 3; i++) {
+               result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
+               if (result)
+                       goto liderror;
+       }
+
+       return 0;
+
+liderror:
+       for (; i > 0; i--)
+               device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+       kfree(lid_ctl);
+       lid_ctl = NULL;
+
+       return result;
+}
+
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
+{
+       int i;
+
+       if (lid_ctl) {
+               for (i = 0; i < 3; i++)
+                       device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+               kfree(lid_ctl);
+               lid_ctl = NULL;
+       }
+}
+
+/* High speed charging function */
+static struct device_attribute *hsc_handle;
+
+static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0131, 0x0100, &result))
+               return -EIO;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
+               /* some models advertise the handle but have no implementation
+                * for it
+                */
+               pr_info("No High Speed Charging capability found\n");
+               return 0;
+       }
+
+       hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!hsc_handle)
+               return -ENOMEM;
+
+       sysfs_attr_init(&hsc_handle->attr);
+       hsc_handle->attr.name = "battery_highspeed_charging";
+       hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
+       hsc_handle->show = sony_nc_highspeed_charging_show;
+       hsc_handle->store = sony_nc_highspeed_charging_store;
+
+       result = device_create_file(&pd->dev, hsc_handle);
+       if (result) {
+               kfree(hsc_handle);
+               hsc_handle = NULL;
+               return result;
+       }
+
+       return 0;
+}
+
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
+{
+       if (hsc_handle) {
+               device_remove_file(&pd->dev, hsc_handle);
+               kfree(hsc_handle);
+               hsc_handle = NULL;
+       }
+}
+
+/* Touchpad enable/disable */
+struct touchpad_control {
+       struct device_attribute attr;
+       int handle;
+};
+static struct touchpad_control *tp_ctl;
+
+static ssize_t sony_nc_touchpad_store(struct device *dev,
+               struct device_attribute *attr, const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       /* sysfs: 0 disabled, 1 enabled
+        * EC: 0 enabled, 1 disabled
+        */
+       if (sony_call_snc_handle(tp_ctl->handle,
+                               (!value << 0x10) | 0x100, &result))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_touchpad_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
+               return -EINVAL;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
+}
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+               unsigned int handle)
+{
+       int ret = 0;
+
+       tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
+       if (!tp_ctl)
+               return -ENOMEM;
+
+       tp_ctl->handle = handle;
+
+       sysfs_attr_init(&tp_ctl->attr.attr);
+       tp_ctl->attr.attr.name = "touchpad";
+       tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
+       tp_ctl->attr.show = sony_nc_touchpad_show;
+       tp_ctl->attr.store = sony_nc_touchpad_store;
+
+       ret = device_create_file(&pd->dev, &tp_ctl->attr);
+       if (ret) {
+               kfree(tp_ctl);
+               tp_ctl = NULL;
+       }
+
+       return ret;
+}
+
+static void sony_nc_touchpad_cleanup(struct platform_device *pd)
+{
+       if (tp_ctl) {
+               device_remove_file(&pd->dev, &tp_ctl->attr);
+               kfree(tp_ctl);
+               tp_ctl = NULL;
+       }
+}
+
+static void sony_nc_backlight_ng_read_limits(int handle,
+               struct sony_backlight_props *props)
+{
+       u64 offset;
+       int i;
+       int lvl_table_len = 0;
+       u8 min = 0xff, max = 0x00;
+       unsigned char buffer[32] = { 0 };
+
+       props->handle = handle;
+       props->offset = 0;
+       props->maxlvl = 0xff;
+
+       offset = sony_find_snc_handle(handle);
+
+       /* try to read the boundaries from ACPI tables, if we fail the above
+        * defaults should be reasonable
+        */
+       i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+                       32);
+       if (i < 0)
+               return;
+
+       switch (handle) {
+       case 0x012f:
+       case 0x0137:
+               lvl_table_len = 9;
+               break;
+       case 0x143:
+               lvl_table_len = 16;
+               break;
+       }
+
+       /* the buffer lists brightness levels available, brightness levels are
+        * from position 0 to 8 in the array, other values are used by ALS
+        * control.
+        */
+       for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
+
+               dprintk("Brightness level: %d\n", buffer[i]);
+
+               if (!buffer[i])
+                       break;
+
+               if (buffer[i] > max)
+                       max = buffer[i];
+               if (buffer[i] < min)
+                       min = buffer[i];
+       }
+       props->offset = min;
+       props->maxlvl = max;
+       dprintk("Brightness levels: min=%d max=%d\n", props->offset,
+                       props->maxlvl);
+}
+
+static void sony_nc_backlight_setup(void)
+{
+       acpi_handle unused;
+       int max_brightness = 0;
+       const struct backlight_ops *ops = NULL;
+       struct backlight_properties props;
+
+       if (sony_find_snc_handle(0x12f) >= 0) {
+               ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x0100;
+               sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+       } else if (sony_find_snc_handle(0x137) >= 0) {
+               ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x0100;
+               sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+       } else if (sony_find_snc_handle(0x143) >= 0) {
+               ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x3000;
+               sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+       } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
+                                               &unused))) {
                ops = &sony_backlight_ops;
                max_brightness = SONY_MAX_BRIGHTNESS - 1;
 
@@ -1713,32 +2640,29 @@ static int sony_nc_add(struct acpi_device *device)
                }
        }
 
+       result = sony_laptop_setup_input(device);
+       if (result) {
+               pr_err("Unable to create input devices\n");
+               goto outplatform;
+       }
+
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
                                         &handle))) {
-               if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+               int arg = 1;
+               if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
                                         &handle))) {
                dprintk("Doing SNC setup\n");
+               /* retrieve the available handles */
                result = sony_nc_handles_setup(sony_pf_device);
-               if (result)
-                       goto outpresent;
-               result = sony_nc_kbd_backlight_setup(sony_pf_device);
-               if (result)
-                       goto outsnc;
-               sony_nc_function_setup(device);
-               sony_nc_rfkill_setup(device);
+               if (!result)
+                       sony_nc_function_setup(device, sony_pf_device);
        }
 
        /* setup input devices and helper fifo */
-       result = sony_laptop_setup_input(device);
-       if (result) {
-               pr_err("Unable to create input devices\n");
-               goto outkbdbacklight;
-       }
-
        if (acpi_video_backlight_support()) {
                pr_info("brightness ignored, must be controlled by ACPI video driver\n");
        } else {
@@ -1786,24 +2710,21 @@ static int sony_nc_add(struct acpi_device *device)
 
        return 0;
 
-      out_sysfs:
+out_sysfs:
        for (item = sony_nc_values; item->name; ++item) {
                device_remove_file(&sony_pf_device->dev, &item->devattr);
        }
        sony_nc_backlight_cleanup();
+       sony_nc_function_cleanup(sony_pf_device);
+       sony_nc_handles_cleanup(sony_pf_device);
 
+outplatform:
        sony_laptop_remove_input();
 
-      outkbdbacklight:
-       sony_nc_kbd_backlight_cleanup(sony_pf_device);
-
-      outsnc:
-       sony_nc_handles_cleanup(sony_pf_device);
-
-      outpresent:
+outpresent:
        sony_pf_remove();
 
-      outwalk:
+outwalk:
        sony_nc_rfkill_cleanup();
        return result;
 }
@@ -1820,11 +2741,10 @@ static int sony_nc_remove(struct acpi_device *device, int type)
                device_remove_file(&sony_pf_device->dev, &item->devattr);
        }
 
-       sony_nc_kbd_backlight_cleanup(sony_pf_device);
+       sony_nc_function_cleanup(sony_pf_device);
        sony_nc_handles_cleanup(sony_pf_device);
        sony_pf_remove();
        sony_laptop_remove_input();
-       sony_nc_rfkill_cleanup();
        dprintk(SONY_NC_DRIVER_NAME " removed.\n");
 
        return 0;
@@ -2437,7 +3357,9 @@ static ssize_t sony_pic_wwanpower_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
+
        mutex_lock(&spic_dev.lock);
        __sony_pic_set_wwanpower(value);
        mutex_unlock(&spic_dev.lock);
@@ -2474,7 +3396,9 @@ static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
+
        mutex_lock(&spic_dev.lock);
        __sony_pic_set_bluetoothpower(value);
        mutex_unlock(&spic_dev.lock);
@@ -2513,7 +3437,9 @@ static ssize_t sony_pic_fanspeed_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
+
        if (sony_pic_set_fanspeed(value))
                return -EIO;
 
@@ -2671,7 +3597,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
                        ret = -EIO;
                        break;
                }
-               if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
+               if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
+                                       &value)) {
                        ret = -EIO;
                        break;
                }
@@ -2688,8 +3615,9 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
                        ret = -EFAULT;
                        break;
                }
-               if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-                               (val8 >> 5) + 1, NULL)) {
+               value = (val8 >> 5) + 1;
+               if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
+                                       NULL)) {
                        ret = -EIO;
                        break;
                }
index d68c0002f4a29c451116422b4693ab2e330f7160..8b5610d884186b63b693b6b7aee2ceb705fc47ee 100644 (file)
@@ -3402,7 +3402,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        /* Do not issue duplicate brightness change events to
         * userspace. tpacpi_detect_brightness_capabilities() must have
         * been called before this point  */
-       if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
+       if (acpi_video_backlight_support()) {
                pr_info("This ThinkPad has standard ACPI backlight "
                        "brightness control, supported by the ACPI "
                        "video driver\n");
index 57787d87d9a4780df7e6d3a5d757aa7179545445..dab10f6edcd43186e2af9a397d909a0ce607694f 100644 (file)
@@ -95,6 +95,7 @@ MODULE_LICENSE("GPL");
 
 /* registers */
 #define HCI_FAN                                0x0004
+#define HCI_TR_BACKLIGHT               0x0005
 #define HCI_SYSTEM_EVENT               0x0016
 #define HCI_VIDEO_OUT                  0x001c
 #define HCI_HOTKEY_EVENT               0x001e
@@ -134,6 +135,7 @@ struct toshiba_acpi_dev {
        unsigned int system_event_supported:1;
        unsigned int ntfy_supported:1;
        unsigned int info_supported:1;
+       unsigned int tr_backlight_supported:1;
 
        struct mutex mutex;
 };
@@ -478,34 +480,70 @@ static const struct rfkill_ops toshiba_rfk_ops = {
        .poll = bt_rfkill_poll,
 };
 
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+{
+       u32 hci_result;
+       u32 status;
+
+       hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
+       *enabled = !status;
+       return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+{
+       u32 hci_result;
+       u32 value = !enable;
+
+       hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
+       return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
 static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
 
-static int get_lcd(struct backlight_device *bd)
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
 {
-       struct toshiba_acpi_dev *dev = bl_get_data(bd);
        u32 hci_result;
        u32 value;
+       int brightness = 0;
+
+       if (dev->tr_backlight_supported) {
+               bool enabled;
+               int ret = get_tr_backlight_status(dev, &enabled);
+               if (ret)
+                       return ret;
+               if (enabled)
+                       return 0;
+               brightness++;
+       }
 
        hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
        if (hci_result == HCI_SUCCESS)
-               return (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+               return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
 
        return -EIO;
 }
 
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+       struct toshiba_acpi_dev *dev = bl_get_data(bd);
+       return __get_lcd_brightness(dev);
+}
+
 static int lcd_proc_show(struct seq_file *m, void *v)
 {
        struct toshiba_acpi_dev *dev = m->private;
        int value;
+       int levels;
 
        if (!dev->backlight_dev)
                return -ENODEV;
 
-       value = get_lcd(dev->backlight_dev);
+       levels = dev->backlight_dev->props.max_brightness + 1;
+       value = get_lcd_brightness(dev->backlight_dev);
        if (value >= 0) {
                seq_printf(m, "brightness:              %d\n", value);
-               seq_printf(m, "brightness_levels:       %d\n",
-                            HCI_LCD_BRIGHTNESS_LEVELS);
+               seq_printf(m, "brightness_levels:       %d\n", levels);
                return 0;
        }
 
@@ -518,10 +556,19 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
        return single_open(file, lcd_proc_show, PDE(inode)->data);
 }
 
-static int set_lcd(struct toshiba_acpi_dev *dev, int value)
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
 {
        u32 hci_result;
 
+       if (dev->tr_backlight_supported) {
+               bool enable = !value;
+               int ret = set_tr_backlight_status(dev, enable);
+               if (ret)
+                       return ret;
+               if (value)
+                       value--;
+       }
+
        value = value << HCI_LCD_BRIGHTNESS_SHIFT;
        hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
        return hci_result == HCI_SUCCESS ? 0 : -EIO;
@@ -530,7 +577,7 @@ static int set_lcd(struct toshiba_acpi_dev *dev, int value)
 static int set_lcd_status(struct backlight_device *bd)
 {
        struct toshiba_acpi_dev *dev = bl_get_data(bd);
-       return set_lcd(dev, bd->props.brightness);
+       return set_lcd_brightness(dev, bd->props.brightness);
 }
 
 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
@@ -541,6 +588,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
        size_t len;
        int value;
        int ret;
+       int levels = dev->backlight_dev->props.max_brightness + 1;
 
        len = min(count, sizeof(cmd) - 1);
        if (copy_from_user(cmd, buf, len))
@@ -548,8 +596,8 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
        cmd[len] = '\0';
 
        if (sscanf(cmd, " brightness : %i", &value) == 1 &&
-           value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
-               ret = set_lcd(dev, value);
+           value >= 0 && value < levels) {
+               ret = set_lcd_brightness(dev, value);
                if (ret == 0)
                        ret = count;
        } else {
@@ -860,8 +908,9 @@ static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
 }
 
 static const struct backlight_ops toshiba_backlight_data = {
-        .get_brightness = get_lcd,
-        .update_status  = set_lcd_status,
+       .options = BL_CORE_SUSPENDRESUME,
+       .get_brightness = get_lcd_brightness,
+       .update_status  = set_lcd_status,
 };
 
 static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
@@ -1020,6 +1069,56 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
        return error;
 }
 
+static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+{
+       struct backlight_properties props;
+       int brightness;
+       int ret;
+       bool enabled;
+
+       /*
+        * Some machines don't support the backlight methods at all, and
+        * others support it read-only. Either of these is pretty useless,
+        * so only register the backlight device if the backlight method
+        * supports both reads and writes.
+        */
+       brightness = __get_lcd_brightness(dev);
+       if (brightness < 0)
+               return 0;
+       ret = set_lcd_brightness(dev, brightness);
+       if (ret) {
+               pr_debug("Backlight method is read-only, disabling backlight support\n");
+               return 0;
+       }
+
+       /* Determine whether or not BIOS supports transflective backlight */
+       ret = get_tr_backlight_status(dev, &enabled);
+       dev->tr_backlight_supported = !ret;
+
+       memset(&props, 0, sizeof(props));
+       props.type = BACKLIGHT_PLATFORM;
+       props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
+
+       /* adding an extra level and having 0 change to transflective mode */
+       if (dev->tr_backlight_supported)
+               props.max_brightness++;
+
+       dev->backlight_dev = backlight_device_register("toshiba",
+                                                      &dev->acpi_dev->dev,
+                                                      dev,
+                                                      &toshiba_backlight_data,
+                                                      &props);
+       if (IS_ERR(dev->backlight_dev)) {
+               ret = PTR_ERR(dev->backlight_dev);
+               pr_err("Could not register toshiba backlight device\n");
+               dev->backlight_dev = NULL;
+               return ret;
+       }
+
+       dev->backlight_dev->props.brightness = brightness;
+       return 0;
+}
+
 static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
 {
        struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1078,7 +1177,6 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
        u32 dummy;
        bool bt_present;
        int ret = 0;
-       struct backlight_properties props;
 
        if (toshiba_acpi)
                return -EBUSY;
@@ -1104,22 +1202,9 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
 
        mutex_init(&dev->mutex);
 
-       memset(&props, 0, sizeof(props));
-       props.type = BACKLIGHT_PLATFORM;
-       props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
-       dev->backlight_dev = backlight_device_register("toshiba",
-                                                      &acpi_dev->dev,
-                                                      dev,
-                                                      &toshiba_backlight_data,
-                                                      &props);
-       if (IS_ERR(dev->backlight_dev)) {
-               ret = PTR_ERR(dev->backlight_dev);
-
-               pr_err("Could not register toshiba backlight device\n");
-               dev->backlight_dev = NULL;
+       ret = toshiba_acpi_setup_backlight(dev);
+       if (ret)
                goto error;
-       }
-       dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
 
        /* Register rfkill switch for Bluetooth */
        if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
index 41781ed8301c737018494bb30a19677640cc9018..b57ad8641480424b660f4d447acebdcf9ec9a2b4 100644 (file)
 
 #include <asm/olpc.h>
 
+static bool card_blocked;
+
 static int rfkill_set_block(void *data, bool blocked)
 {
        unsigned char cmd;
+       int r;
+
+       if (blocked == card_blocked)
+               return 0;
+
        if (blocked)
                cmd = EC_WLAN_ENTER_RESET;
        else
                cmd = EC_WLAN_LEAVE_RESET;
 
-       return olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+       r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+       if (r == 0)
+               card_blocked = blocked;
+
+       return r;
 }
 
 static const struct rfkill_ops rfkill_ops = {
index 99dc29f2f2f2ba84b16430a51548817b094b9c3a..e3a3b4956f08408741fe5856a562b8defe7294a1 100644 (file)
@@ -1,5 +1,5 @@
 menuconfig POWER_SUPPLY
-       tristate "Power supply class support"
+       bool "Power supply class support"
        help
          Say Y here to enable power supply class support. This allows
          power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@ config BATTERY_DS2780
          Say Y here to enable support for batteries with ds2780 chip.
 
 config BATTERY_DS2781
-       tristate "2781 battery driver"
+       tristate "DS2781 battery driver"
        depends on HAS_IOMEM
        select W1
        select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@ config BATTERY_MAX17040
          to operate with a single lithium cell
 
 config BATTERY_MAX17042
-       tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+       tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
        depends on I2C
        help
          MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
          in handheld and portable equipment. The MAX17042 is configured
          to operate with a single lithium cell. MAX8997 and MAX8966 are
          multi-function devices that include fuel gauages that are compatible
-         with MAX17042.
+         with MAX17042. This driver also supports max17047/50 chips which are
+         improved version of max17042.
 
 config BATTERY_Z2
        tristate "Z2 battery driver"
@@ -291,6 +292,7 @@ config CHARGER_MAX8998
 config CHARGER_SMB347
        tristate "Summit Microelectronics SMB347 Battery Charger"
        depends on I2C
+       select REGMAP_I2C
        help
          Say Y to include support for Summit Microelectronics SMB347
          Battery Charger.
index d8bb99394ac01c1e6ebb289698dea0f8d3426cd8..bba3ccac72fe731a6807e211af9171a3204ce8c8 100644 (file)
@@ -964,10 +964,15 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
 {
        int irq, i, ret = 0;
        u8 val;
-       struct abx500_bm_plat_data *plat_data;
+       struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+       struct ab8500_btemp *di;
+
+       if (!plat_data) {
+               dev_err(&pdev->dev, "No platform data\n");
+               return -EINVAL;
+       }
 
-       struct ab8500_btemp *di =
-               kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+       di = kzalloc(sizeof(*di), GFP_KERNEL);
        if (!di)
                return -ENOMEM;
 
@@ -977,7 +982,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
        di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
        /* get btemp specific platform data */
-       plat_data = pdev->dev.platform_data;
        di->pdata = plat_data->btemp;
        if (!di->pdata) {
                dev_err(di->dev, "no btemp platform data supplied\n");
index e2b4accbec8815782ad1bf659b90e40196d31b63..d2303d0b7c755669f7ab48b968e062474241997b 100644 (file)
@@ -2534,10 +2534,15 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
 static int __devinit ab8500_charger_probe(struct platform_device *pdev)
 {
        int irq, i, charger_status, ret = 0;
-       struct abx500_bm_plat_data *plat_data;
+       struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+       struct ab8500_charger *di;
 
-       struct ab8500_charger *di =
-               kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+       if (!plat_data) {
+               dev_err(&pdev->dev, "No platform data\n");
+               return -EINVAL;
+       }
+
+       di = kzalloc(sizeof(*di), GFP_KERNEL);
        if (!di)
                return -ENOMEM;
 
@@ -2550,9 +2555,7 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
        spin_lock_init(&di->usb_state.usb_lock);
 
        /* get charger specific platform data */
-       plat_data = pdev->dev.platform_data;
        di->pdata = plat_data->charger;
-
        if (!di->pdata) {
                dev_err(di->dev, "no charger platform data supplied\n");
                ret = -EINVAL;
index c22f2f05657e28d249d619d9a01aaab8a3095bb6..bf022255994c86b3d3486e27e1edcc905c7246f4 100644 (file)
@@ -2446,10 +2446,15 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
 {
        int i, irq;
        int ret = 0;
-       struct abx500_bm_plat_data *plat_data;
+       struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+       struct ab8500_fg *di;
+
+       if (!plat_data) {
+               dev_err(&pdev->dev, "No platform data\n");
+               return -EINVAL;
+       }
 
-       struct ab8500_fg *di =
-               kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+       di = kzalloc(sizeof(*di), GFP_KERNEL);
        if (!di)
                return -ENOMEM;
 
@@ -2461,7 +2466,6 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
        di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
        /* get fg specific platform data */
-       plat_data = pdev->dev.platform_data;
        di->pdata = plat_data->fg;
        if (!di->pdata) {
                dev_err(di->dev, "no fg platform data supplied\n");
index 9eca9f1ff0eae2e5b381e503f035ceb446340e66..86935ec1895431aac77c47de1860711ae26e21eb 100644 (file)
 #include <linux/power/charger-manager.h>
 #include <linux/regulator/consumer.h>
 
+static const char * const default_event_names[] = {
+       [CM_EVENT_UNKNOWN] = "Unknown",
+       [CM_EVENT_BATT_FULL] = "Battery Full",
+       [CM_EVENT_BATT_IN] = "Battery Inserted",
+       [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
+       [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
+       [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
+       [CM_EVENT_OTHERS] = "Other battery events"
+};
+
 /*
  * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
  * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@ static bool cm_suspended;
 static bool cm_rtc_set;
 static unsigned long cm_suspend_duration_ms;
 
+/* About normal (not suspended) monitoring */
+static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
+static unsigned long next_polling; /* Next appointed polling time */
+static struct workqueue_struct *cm_wq; /* init at driver add */
+static struct delayed_work cm_monitor_work; /* init at driver add */
+
 /* Global charger-manager description */
 static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
 
@@ -71,6 +87,11 @@ static bool is_batt_present(struct charger_manager *cm)
        int i, ret;
 
        switch (cm->desc->battery_present) {
+       case CM_BATTERY_PRESENT:
+               present = true;
+               break;
+       case CM_NO_BATTERY:
+               break;
        case CM_FUEL_GAUGE:
                ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
                                POWER_SUPPLY_PROP_PRESENT, &val);
@@ -278,6 +299,26 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
        return err;
 }
 
+/**
+ * try_charger_restart - Restart charging.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Restart charging by turning off and on the charger.
+ */
+static int try_charger_restart(struct charger_manager *cm)
+{
+       int err;
+
+       if (cm->emergency_stop)
+               return -EAGAIN;
+
+       err = try_charger_enable(cm, false);
+       if (err)
+               return err;
+
+       return try_charger_enable(cm, true);
+}
+
 /**
  * uevent_notify - Let users know something has changed.
  * @cm: the Charger Manager representing the battery.
@@ -333,6 +374,46 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
        dev_info(cm->dev, event);
 }
 
+/**
+ * fullbatt_vchk - Check voltage drop some times after "FULL" event.
+ * @work: the work_struct appointing the function
+ *
+ * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
+ * charger_desc, Charger Manager checks voltage drop after the battery
+ * "FULL" event. It checks whether the voltage has dropped more than
+ * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
+ */
+static void fullbatt_vchk(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct charger_manager *cm = container_of(dwork,
+                       struct charger_manager, fullbatt_vchk_work);
+       struct charger_desc *desc = cm->desc;
+       int batt_uV, err, diff;
+
+       /* remove the appointment for fullbatt_vchk */
+       cm->fullbatt_vchk_jiffies_at = 0;
+
+       if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+               return;
+
+       err = get_batt_uV(cm, &batt_uV);
+       if (err) {
+               dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
+               return;
+       }
+
+       diff = cm->fullbatt_vchk_uV;
+       diff -= batt_uV;
+
+       dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
+
+       if (diff > desc->fullbatt_vchkdrop_uV) {
+               try_charger_restart(cm);
+               uevent_notify(cm, "Recharge");
+       }
+}
+
 /**
  * _cm_monitor - Monitor the temperature and return true for exceptions.
  * @cm: the Charger Manager representing the battery.
@@ -392,6 +473,131 @@ static bool cm_monitor(void)
        return stop;
 }
 
+/**
+ * _setup_polling - Setup the next instance of polling.
+ * @work: work_struct of the function _setup_polling.
+ */
+static void _setup_polling(struct work_struct *work)
+{
+       unsigned long min = ULONG_MAX;
+       struct charger_manager *cm;
+       bool keep_polling = false;
+       unsigned long _next_polling;
+
+       mutex_lock(&cm_list_mtx);
+
+       list_for_each_entry(cm, &cm_list, entry) {
+               if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
+                       keep_polling = true;
+
+                       if (min > cm->desc->polling_interval_ms)
+                               min = cm->desc->polling_interval_ms;
+               }
+       }
+
+       polling_jiffy = msecs_to_jiffies(min);
+       if (polling_jiffy <= CM_JIFFIES_SMALL)
+               polling_jiffy = CM_JIFFIES_SMALL + 1;
+
+       if (!keep_polling)
+               polling_jiffy = ULONG_MAX;
+       if (polling_jiffy == ULONG_MAX)
+               goto out;
+
+       WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
+                           ". try it later. %s\n", __func__);
+
+       _next_polling = jiffies + polling_jiffy;
+
+       if (!delayed_work_pending(&cm_monitor_work) ||
+           (delayed_work_pending(&cm_monitor_work) &&
+            time_after(next_polling, _next_polling))) {
+               cancel_delayed_work_sync(&cm_monitor_work);
+               next_polling = jiffies + polling_jiffy;
+               queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+       }
+
+out:
+       mutex_unlock(&cm_list_mtx);
+}
+static DECLARE_WORK(setup_polling, _setup_polling);
+
+/**
+ * cm_monitor_poller - The Monitor / Poller.
+ * @work: work_struct of the function cm_monitor_poller
+ *
+ * During non-suspended state, cm_monitor_poller is used to poll and monitor
+ * the batteries.
+ */
+static void cm_monitor_poller(struct work_struct *work)
+{
+       cm_monitor();
+       schedule_work(&setup_polling);
+}
+
+/**
+ * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
+ * @cm: the Charger Manager representing the battery.
+ */
+static void fullbatt_handler(struct charger_manager *cm)
+{
+       struct charger_desc *desc = cm->desc;
+
+       if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+               goto out;
+
+       if (cm_suspended)
+               device_set_wakeup_capable(cm->dev, true);
+
+       if (delayed_work_pending(&cm->fullbatt_vchk_work))
+               cancel_delayed_work(&cm->fullbatt_vchk_work);
+       queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+                          msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+       cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
+                                      desc->fullbatt_vchkdrop_ms);
+
+       if (cm->fullbatt_vchk_jiffies_at == 0)
+               cm->fullbatt_vchk_jiffies_at = 1;
+
+out:
+       dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
+       uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
+}
+
+/**
+ * battout_handler - Event handler for CM_EVENT_BATT_OUT
+ * @cm: the Charger Manager representing the battery.
+ */
+static void battout_handler(struct charger_manager *cm)
+{
+       if (cm_suspended)
+               device_set_wakeup_capable(cm->dev, true);
+
+       if (!is_batt_present(cm)) {
+               dev_emerg(cm->dev, "Battery Pulled Out!\n");
+               uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
+       } else {
+               uevent_notify(cm, "Battery Reinserted?");
+       }
+}
+
+/**
+ * misc_event_handler - Handler for other evnets
+ * @cm: the Charger Manager representing the battery.
+ * @type: the Charger Manager representing the battery.
+ */
+static void misc_event_handler(struct charger_manager *cm,
+                       enum cm_event_types type)
+{
+       if (cm_suspended)
+               device_set_wakeup_capable(cm->dev, true);
+
+       if (!delayed_work_pending(&cm_monitor_work) &&
+           is_polling_required(cm) && cm->desc->polling_interval_ms)
+               schedule_work(&setup_polling);
+       uevent_notify(cm, default_event_names[type]);
+}
+
 static int charger_get_property(struct power_supply *psy,
                enum power_supply_property psp,
                union power_supply_propval *val)
@@ -613,6 +819,21 @@ static bool cm_setup_timer(void)
        mutex_lock(&cm_list_mtx);
 
        list_for_each_entry(cm, &cm_list, entry) {
+               unsigned int fbchk_ms = 0;
+
+               /* fullbatt_vchk is required. setup timer for that */
+               if (cm->fullbatt_vchk_jiffies_at) {
+                       fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
+                                                   - jiffies);
+                       if (time_is_before_eq_jiffies(
+                               cm->fullbatt_vchk_jiffies_at) ||
+                               msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
+                               fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+                               fbchk_ms = 0;
+                       }
+               }
+               CM_MIN_VALID(wakeup_ms, fbchk_ms);
+
                /* Skip if polling is not required for this CM */
                if (!is_polling_required(cm) && !cm->emergency_stop)
                        continue;
@@ -672,6 +893,23 @@ static bool cm_setup_timer(void)
        return false;
 }
 
+static void _cm_fbchk_in_suspend(struct charger_manager *cm)
+{
+       unsigned long jiffy_now = jiffies;
+
+       if (!cm->fullbatt_vchk_jiffies_at)
+               return;
+
+       if (g_desc && g_desc->assume_timer_stops_in_suspend)
+               jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
+
+       /* Execute now if it's going to be executed not too long after */
+       jiffy_now += CM_JIFFIES_SMALL;
+
+       if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
+               fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+}
+
 /**
  * cm_suspend_again - Determine whether suspend again or not
  *
@@ -693,6 +931,8 @@ bool cm_suspend_again(void)
        ret = true;
        mutex_lock(&cm_list_mtx);
        list_for_each_entry(cm, &cm_list, entry) {
+               _cm_fbchk_in_suspend(cm);
+
                if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
                    cm->status_save_batt != is_batt_present(cm)) {
                        ret = false;
@@ -796,6 +1036,21 @@ static int charger_manager_probe(struct platform_device *pdev)
        memcpy(cm->desc, desc, sizeof(struct charger_desc));
        cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
 
+       /*
+        * The following two do not need to be errors.
+        * Users may intentionally ignore those two features.
+        */
+       if (desc->fullbatt_uV == 0) {
+               dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
+                                       " as it is not supplied.");
+       }
+       if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
+               dev_info(&pdev->dev, "Disabling full-battery voltage drop "
+                               "checking mechanism as it is not supplied.");
+               desc->fullbatt_vchkdrop_ms = 0;
+               desc->fullbatt_vchkdrop_uV = 0;
+       }
+
        if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
                ret = -EINVAL;
                dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@ static int charger_manager_probe(struct platform_device *pdev)
                cm->charger_psy.num_properties++;
        }
 
+       INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
+
        ret = power_supply_register(NULL, &cm->charger_psy);
        if (ret) {
                dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@ static int charger_manager_probe(struct platform_device *pdev)
        list_add(&cm->entry, &cm_list);
        mutex_unlock(&cm_list_mtx);
 
+       /*
+        * Charger-manager is capable of waking up the systme from sleep
+        * when event is happend through cm_notify_event()
+        */
+       device_init_wakeup(&pdev->dev, true);
+       device_set_wakeup_capable(&pdev->dev, false);
+
+       schedule_work(&setup_polling);
+
        return 0;
 
 err_chg_enable:
@@ -958,9 +1224,17 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
        list_del(&cm->entry);
        mutex_unlock(&cm_list_mtx);
 
+       if (work_pending(&setup_polling))
+               cancel_work_sync(&setup_polling);
+       if (delayed_work_pending(&cm_monitor_work))
+               cancel_delayed_work_sync(&cm_monitor_work);
+
        regulator_bulk_free(desc->num_charger_regulators,
                            desc->charger_regulators);
        power_supply_unregister(&cm->charger_psy);
+
+       try_charger_enable(cm, false);
+
        kfree(cm->charger_psy.properties);
        kfree(cm->charger_stat);
        kfree(cm->desc);
@@ -975,6 +1249,18 @@ static const struct platform_device_id charger_manager_id[] = {
 };
 MODULE_DEVICE_TABLE(platform, charger_manager_id);
 
+static int cm_suspend_noirq(struct device *dev)
+{
+       int ret = 0;
+
+       if (device_may_wakeup(dev)) {
+               device_set_wakeup_capable(dev, false);
+               ret = -EAGAIN;
+       }
+
+       return ret;
+}
+
 static int cm_suspend_prepare(struct device *dev)
 {
        struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@ static int cm_suspend_prepare(struct device *dev)
                cm_suspended = true;
        }
 
+       if (delayed_work_pending(&cm->fullbatt_vchk_work))
+               cancel_delayed_work(&cm->fullbatt_vchk_work);
        cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
        cm->status_save_batt = is_batt_present(cm);
 
@@ -1027,11 +1315,40 @@ static void cm_suspend_complete(struct device *dev)
                cm_rtc_set = false;
        }
 
+       /* Re-enqueue delayed work (fullbatt_vchk_work) */
+       if (cm->fullbatt_vchk_jiffies_at) {
+               unsigned long delay = 0;
+               unsigned long now = jiffies + CM_JIFFIES_SMALL;
+
+               if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
+                       delay = (unsigned long)((long)now
+                               - (long)(cm->fullbatt_vchk_jiffies_at));
+                       delay = jiffies_to_msecs(delay);
+               } else {
+                       delay = 0;
+               }
+
+               /*
+                * Account for cm_suspend_duration_ms if
+                * assume_timer_stops_in_suspend is active
+                */
+               if (g_desc && g_desc->assume_timer_stops_in_suspend) {
+                       if (delay > cm_suspend_duration_ms)
+                               delay -= cm_suspend_duration_ms;
+                       else
+                               delay = 0;
+               }
+
+               queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+                                  msecs_to_jiffies(delay));
+       }
+       device_set_wakeup_capable(cm->dev, false);
        uevent_notify(cm, NULL);
 }
 
 static const struct dev_pm_ops charger_manager_pm = {
        .prepare        = cm_suspend_prepare,
+       .suspend_noirq  = cm_suspend_noirq,
        .complete       = cm_suspend_complete,
 };
 
@@ -1048,16 +1365,91 @@ static struct platform_driver charger_manager_driver = {
 
 static int __init charger_manager_init(void)
 {
+       cm_wq = create_freezable_workqueue("charger_manager");
+       INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
+
        return platform_driver_register(&charger_manager_driver);
 }
 late_initcall(charger_manager_init);
 
 static void __exit charger_manager_cleanup(void)
 {
+       destroy_workqueue(cm_wq);
+       cm_wq = NULL;
+
        platform_driver_unregister(&charger_manager_driver);
 }
 module_exit(charger_manager_cleanup);
 
+/**
+ * find_power_supply - find the associated power_supply of charger
+ * @cm: the Charger Manager representing the battery
+ * @psy: pointer to instance of charger's power_supply
+ */
+static bool find_power_supply(struct charger_manager *cm,
+                       struct power_supply *psy)
+{
+       int i;
+       bool found = false;
+
+       for (i = 0; cm->charger_stat[i]; i++) {
+               if (psy == cm->charger_stat[i]) {
+                       found = true;
+                       break;
+               }
+       }
+
+       return found;
+}
+
+/**
+ * cm_notify_event - charger driver notify Charger Manager of charger event
+ * @psy: pointer to instance of charger's power_supply
+ * @type: type of charger event
+ * @msg: optional message passed to uevent_notify fuction
+ */
+void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
+                    char *msg)
+{
+       struct charger_manager *cm;
+       bool found_power_supply = false;
+
+       if (psy == NULL)
+               return;
+
+       mutex_lock(&cm_list_mtx);
+       list_for_each_entry(cm, &cm_list, entry) {
+               found_power_supply = find_power_supply(cm, psy);
+               if (found_power_supply)
+                       break;
+       }
+       mutex_unlock(&cm_list_mtx);
+
+       if (!found_power_supply)
+               return;
+
+       switch (type) {
+       case CM_EVENT_BATT_FULL:
+               fullbatt_handler(cm);
+               break;
+       case CM_EVENT_BATT_OUT:
+               battout_handler(cm);
+               break;
+       case CM_EVENT_BATT_IN:
+       case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
+               misc_event_handler(cm, type);
+               break;
+       case CM_EVENT_UNKNOWN:
+       case CM_EVENT_OTHERS:
+               uevent_notify(cm, msg ? msg : default_event_names[type]);
+               break;
+       default:
+               dev_err(cm->dev, "%s type not specified.\n", __func__);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(cm_notify_event);
+
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_DESCRIPTION("Charger Manager");
 MODULE_LICENSE("GPL");
index ca0d653d0a7a2c3ac0d4c422f66317c3993dc49b..975684a40f1519ad33e5f630ecfa82020d71d960 100644 (file)
@@ -643,9 +643,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
        struct power_supply *psy = to_power_supply(dev);
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK1_END -
-               DS2781_EEPROM_BLOCK1_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
 
        return ds2781_read_block(dev_info, buf,
                                DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
        int ret;
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK1_END -
-               DS2781_EEPROM_BLOCK1_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
 
        ret = ds2781_write(dev_info, buf,
                                DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@ static struct bin_attribute ds2781_param_eeprom_bin_attr = {
                .name = "param_eeprom",
                .mode = S_IRUGO | S_IWUSR,
        },
-       .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1,
+       .size = DS2781_PARAM_EEPROM_SIZE,
        .read = ds2781_read_param_eeprom_bin,
        .write = ds2781_write_param_eeprom_bin,
 };
@@ -696,9 +692,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
        struct power_supply *psy = to_power_supply(dev);
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK0_END -
-               DS2781_EEPROM_BLOCK0_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
 
        return ds2781_read_block(dev_info, buf,
                                DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
        int ret;
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK0_END -
-               DS2781_EEPROM_BLOCK0_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
 
        ret = ds2781_write(dev_info, buf,
                                DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@ static struct bin_attribute ds2781_user_eeprom_bin_attr = {
                .name = "user_eeprom",
                .mode = S_IRUGO | S_IWUSR,
        },
-       .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1,
+       .size = DS2781_USER_EEPROM_SIZE,
        .read = ds2781_read_user_eeprom_bin,
        .write = ds2781_write_user_eeprom_bin,
 };
index 39eb50f35f09fd777445a53202fb30823d9f9438..e5ccd29797732d8dae6992a558eabd896fe6a9f0 100644 (file)
@@ -474,13 +474,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
 fail2:
        power_supply_unregister(&isp->psy);
 fail1:
+       isp1704_charger_set_power(isp, 0);
        usb_put_transceiver(isp->phy);
 fail0:
        kfree(isp);
 
        dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
 
-       isp1704_charger_set_power(isp, 0);
        return ret;
 }
 
index 04620c2cb388f3f5f2411f6a2b8191c5b36b459f..140788b309f84fe26056e77636eff26b954d8cb8 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <linux/mod_devicetable.h>
 #include <linux/power_supply.h>
 #include <linux/power/max17042_battery.h>
 #define dP_ACC_100     0x1900
 #define dP_ACC_200     0x3200
 
+#define MAX17042_IC_VERSION    0x0092
+#define MAX17047_IC_VERSION    0x00AC  /* same for max17050 */
+
 struct max17042_chip {
        struct i2c_client *client;
        struct power_supply battery;
+       enum max170xx_chip_type chip_type;
        struct max17042_platform_data *pdata;
        struct work_struct work;
        int    init_complete;
@@ -105,6 +110,7 @@ static enum power_supply_property max17042_battery_props[] = {
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_VOLTAGE_AVG,
+       POWER_SUPPLY_PROP_VOLTAGE_OCV,
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CHARGE_FULL,
        POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@ static int max17042_get_property(struct power_supply *psy,
                val->intval *= 20000; /* Units of LSB = 20mV */
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
-               ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+               if (chip->chip_type == MAX17042)
+                       ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+               else
+                       ret = max17042_read_reg(chip->client, MAX17047_V_empty);
                if (ret < 0)
                        return ret;
 
@@ -169,6 +178,13 @@ static int max17042_get_property(struct power_supply *psy,
                if (ret < 0)
                        return ret;
 
+               val->intval = ret * 625 / 8;
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+               ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+               if (ret < 0)
+                       return ret;
+
                val->intval = ret * 625 / 8;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
@@ -325,11 +341,10 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
 static int max17042_init_model(struct max17042_chip *chip)
 {
        int ret;
-       int table_size =
-               sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+       int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
        u16 *temp_data;
 
-       temp_data = kzalloc(table_size, GFP_KERNEL);
+       temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
        if (!temp_data)
                return -ENOMEM;
 
@@ -354,12 +369,11 @@ static int max17042_init_model(struct max17042_chip *chip)
 static int max17042_verify_model_lock(struct max17042_chip *chip)
 {
        int i;
-       int table_size =
-               sizeof(chip->pdata->config_data->cell_char_tbl);
+       int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
        u16 *temp_data;
        int ret = 0;
 
-       temp_data = kzalloc(table_size, GFP_KERNEL);
+       temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
        if (!temp_data)
                return -ENOMEM;
 
@@ -382,6 +396,9 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
        max17042_write_reg(chip->client, MAX17042_FilterCFG,
                        config->filter_cfg);
        max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+       if (chip->chip_type == MAX17047)
+               max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+                                               config->full_soc_thresh);
 }
 
 static void  max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@ static void  max17042_write_custom_regs(struct max17042_chip *chip)
                                config->rcomp0);
        max17042_write_verify_reg(chip->client, MAX17042_TempCo,
                                config->tcompc0);
-       max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
-                       config->empty_tempco);
-       max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
-                               config->kempty0);
        max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
                                config->ichgt_term);
+       if (chip->chip_type == MAX17042) {
+               max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+                                       config->empty_tempco);
+               max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+                                       config->kempty0);
+       } else {
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+                                               config->qrtbl00);
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+                                               config->qrtbl10);
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+                                               config->qrtbl20);
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+                                               config->qrtbl30);
+       }
 }
 
 static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
                        config->design_cap);
        max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
                        config->fullcapnom);
+       /* Update SOC register with new SOC */
+       max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
 }
 
 /*
@@ -489,20 +519,28 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
 
        max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
        max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
-       max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+       if (chip->chip_type == MAX17042)
+               max17042_override_por(client, MAX17042_SOC_empty,
+                                               config->socempty);
        max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
        max17042_override_por(client, MAX17042_dQacc, config->dqacc);
        max17042_override_por(client, MAX17042_dPacc, config->dpacc);
 
-       max17042_override_por(client, MAX17042_V_empty, config->vempty);
+       if (chip->chip_type == MAX17042)
+               max17042_override_por(client, MAX17042_V_empty, config->vempty);
+       else
+               max17042_override_por(client, MAX17047_V_empty, config->vempty);
        max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
        max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
        max17042_override_por(client, MAX17042_FCTC, config->fctc);
        max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
        max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
-       max17042_override_por(client, MAX17042_EmptyTempCo,
-                       config->empty_tempco);
-       max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+       if (chip->chip_type) {
+               max17042_override_por(client, MAX17042_EmptyTempCo,
+                                       config->empty_tempco);
+               max17042_override_por(client, MAX17042_K_empty0,
+                                       config->kempty0);
+       }
 }
 
 static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@ static int __devinit max17042_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, chip);
 
-       chip->battery.name              = "max17042_battery";
+       ret = max17042_read_reg(chip->client, MAX17042_DevName);
+       if (ret == MAX17042_IC_VERSION) {
+               dev_dbg(&client->dev, "chip type max17042 detected\n");
+               chip->chip_type = MAX17042;
+       } else if (ret == MAX17047_IC_VERSION) {
+               dev_dbg(&client->dev, "chip type max17047/50 detected\n");
+               chip->chip_type = MAX17047;
+       } else {
+               dev_err(&client->dev, "device version mismatch: %x\n", ret);
+               return -EIO;
+       }
+
+       chip->battery.name              = "max170xx_battery";
        chip->battery.type              = POWER_SUPPLY_TYPE_BATTERY;
        chip->battery.get_property      = max17042_get_property;
        chip->battery.properties        = max17042_battery_props;
@@ -683,6 +733,12 @@ static int __devinit max17042_probe(struct i2c_client *client,
                max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
        }
 
+       ret = power_supply_register(&client->dev, &chip->battery);
+       if (ret) {
+               dev_err(&client->dev, "failed: power supply register\n");
+               return ret;
+       }
+
        if (client->irq) {
                ret = request_threaded_irq(client->irq, NULL,
                                                max17042_thread_handler,
@@ -693,13 +749,14 @@ static int __devinit max17042_probe(struct i2c_client *client,
                        reg |= CONFIG_ALRT_BIT_ENBL;
                        max17042_write_reg(client, MAX17042_CONFIG, reg);
                        max17042_set_soc_threshold(chip, 1);
-               } else
+               } else {
+                       client->irq = 0;
                        dev_err(&client->dev, "%s(): cannot get IRQ\n",
                                __func__);
+               }
        }
 
        reg = max17042_read_reg(chip->client, MAX17042_STATUS);
-
        if (reg & STATUS_POR_BIT) {
                INIT_WORK(&chip->work, max17042_init_worker);
                schedule_work(&chip->work);
@@ -707,23 +764,65 @@ static int __devinit max17042_probe(struct i2c_client *client,
                chip->init_complete = 1;
        }
 
-       ret = power_supply_register(&client->dev, &chip->battery);
-       if (ret)
-               dev_err(&client->dev, "failed: power supply register\n");
-       return ret;
+       return 0;
 }
 
 static int __devexit max17042_remove(struct i2c_client *client)
 {
        struct max17042_chip *chip = i2c_get_clientdata(client);
 
+       if (client->irq)
+               free_irq(client->irq, chip);
        power_supply_unregister(&chip->battery);
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int max17042_suspend(struct device *dev)
+{
+       struct max17042_chip *chip = dev_get_drvdata(dev);
+
+       /*
+        * disable the irq and enable irq_wake
+        * capability to the interrupt line.
+        */
+       if (chip->client->irq) {
+               disable_irq(chip->client->irq);
+               enable_irq_wake(chip->client->irq);
+       }
+
+       return 0;
+}
+
+static int max17042_resume(struct device *dev)
+{
+       struct max17042_chip *chip = dev_get_drvdata(dev);
+
+       if (chip->client->irq) {
+               disable_irq_wake(chip->client->irq);
+               enable_irq(chip->client->irq);
+               /* re-program the SOC thresholds to 1% change */
+               max17042_set_soc_threshold(chip, 1);
+       }
+
+       return 0;
+}
+
+static const struct dev_pm_ops max17042_pm_ops = {
+       .suspend        = max17042_suspend,
+       .resume         = max17042_resume,
+};
+
+#define MAX17042_PM_OPS (&max17042_pm_ops)
+#else
+#define MAX17042_PM_OPS NULL
+#endif
+
 #ifdef CONFIG_OF
 static const struct of_device_id max17042_dt_match[] = {
        { .compatible = "maxim,max17042" },
+       { .compatible = "maxim,max17047" },
+       { .compatible = "maxim,max17050" },
        { },
 };
 MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@ MODULE_DEVICE_TABLE(of, max17042_dt_match);
 
 static const struct i2c_device_id max17042_id[] = {
        { "max17042", 0 },
+       { "max17047", 1 },
+       { "max17050", 2 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@ static struct i2c_driver max17042_i2c_driver = {
        .driver = {
                .name   = "max17042",
                .of_match_table = of_match_ptr(max17042_dt_match),
+               .pm     = MAX17042_PM_OPS,
        },
        .probe          = max17042_probe,
        .remove         = __devexit_p(max17042_remove),
index 4368e7d61316bb37c9a3565ca1c0400964aa6781..4150747f9186f8dfaff9faf81bf03a7ade8cb9fa 100644 (file)
@@ -146,6 +146,7 @@ static struct device_attribute power_supply_attrs[] = {
        POWER_SUPPLY_ATTR(voltage_min_design),
        POWER_SUPPLY_ATTR(voltage_now),
        POWER_SUPPLY_ATTR(voltage_avg),
+       POWER_SUPPLY_ATTR(voltage_ocv),
        POWER_SUPPLY_ATTR(current_max),
        POWER_SUPPLY_ATTR(current_now),
        POWER_SUPPLY_ATTR(current_avg),
index 06b659d9179009e032bd7aaf2953aa8bf82c84fb..a5b6849d4123b51b60d2431bbcd72cc9f13c86f4 100644 (file)
@@ -89,7 +89,7 @@ static const struct chip_data {
        [REG_CURRENT] =
                SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
        [REG_CAPACITY] =
-               SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+               SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
        [REG_REMAINING_CAPACITY] =
                SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
        [REG_REMAINING_CAPACITY_CHARGE] =
index ce1694d1a36584b3e910adde4dd0ff5800fc4c72..f8eedd8a676fc68ad21f45b8bfa4ddec55add723 100644 (file)
@@ -11,7 +11,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/debugfs.h>
+#include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -21,7 +21,7 @@
 #include <linux/mutex.h>
 #include <linux/power_supply.h>
 #include <linux/power/smb347-charger.h>
-#include <linux/seq_file.h>
+#include <linux/regmap.h>
 
 /*
  * Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
 #define CFG_CURRENT_LIMIT_DC_SHIFT             4
 #define CFG_CURRENT_LIMIT_USB_MASK             0x0f
 #define CFG_FLOAT_VOLTAGE                      0x03
+#define CFG_FLOAT_VOLTAGE_FLOAT_MASK           0x3f
 #define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK       0xc0
 #define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT      6
 #define CFG_STAT                               0x05
 #define STAT_C_CHARGER_ERROR                   BIT(6)
 #define STAT_E                                 0x3f
 
+#define SMB347_MAX_REGISTER                    0x3f
+
 /**
  * struct smb347_charger - smb347 charger instance
  * @lock: protects concurrent access to online variables
- * @client: pointer to i2c client
+ * @dev: pointer to device
+ * @regmap: pointer to driver regmap
  * @mains: power_supply instance for AC/DC power
  * @usb: power_supply instance for USB power
  * @battery: power_supply instance for battery
  * @mains_online: is AC/DC input connected
  * @usb_online: is USB input connected
  * @charging_enabled: is charging enabled
- * @dentry: for debugfs
  * @pdata: pointer to platform data
  */
 struct smb347_charger {
        struct mutex            lock;
-       struct i2c_client       *client;
+       struct device           *dev;
+       struct regmap           *regmap;
        struct power_supply     mains;
        struct power_supply     usb;
        struct power_supply     battery;
        bool                    mains_online;
        bool                    usb_online;
        bool                    charging_enabled;
-       struct dentry           *dentry;
        const struct smb347_charger_platform_data *pdata;
 };
 
@@ -193,14 +196,6 @@ static const unsigned int ccc_tbl[] = {
        1200000,
 };
 
-/* Convert register value to current using lookup table */
-static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
-{
-       if (val >= size)
-               return -EINVAL;
-       return tbl[val];
-}
-
 /* Convert current to register value using lookup table */
 static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
 {
@@ -212,43 +207,22 @@ static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
        return i > 0 ? i - 1 : -EINVAL;
 }
 
-static int smb347_read(struct smb347_charger *smb, u8 reg)
-{
-       int ret;
-
-       ret = i2c_smbus_read_byte_data(smb->client, reg);
-       if (ret < 0)
-               dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
-                        reg, ret);
-       return ret;
-}
-
-static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
-{
-       int ret;
-
-       ret = i2c_smbus_write_byte_data(smb->client, reg, val);
-       if (ret < 0)
-               dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
-                        reg, ret);
-       return ret;
-}
-
 /**
- * smb347_update_status - updates the charging status
+ * smb347_update_ps_status - refreshes the power source status
  * @smb: pointer to smb347 charger instance
  *
- * Function checks status of the charging and updates internal state
- * accordingly. Returns %0 if there is no change in status, %1 if the
- * status has changed and negative errno in case of failure.
+ * Function checks whether any power source is connected to the charger and
+ * updates internal state accordingly. If there is a change to previous state
+ * function returns %1, otherwise %0 and negative errno in case of errror.
  */
-static int smb347_update_status(struct smb347_charger *smb)
+static int smb347_update_ps_status(struct smb347_charger *smb)
 {
        bool usb = false;
        bool dc = false;
+       unsigned int val;
        int ret;
 
-       ret = smb347_read(smb, IRQSTAT_E);
+       ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
        if (ret < 0)
                return ret;
 
@@ -257,9 +231,9 @@ static int smb347_update_status(struct smb347_charger *smb)
         * platform data _and_ whether corresponding undervoltage is set.
         */
        if (smb->pdata->use_mains)
-               dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+               dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
        if (smb->pdata->use_usb)
-               usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+               usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
 
        mutex_lock(&smb->lock);
        ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@ static int smb347_update_status(struct smb347_charger *smb)
 }
 
 /*
- * smb347_is_online - returns whether input power source is connected
+ * smb347_is_ps_online - returns whether input power source is connected
  * @smb: pointer to smb347 charger instance
  *
  * Returns %true if input power source is connected. Note that this is
  * dependent on what platform has configured for usable power sources. For
- * example if USB is disabled, this will return %false even if the USB
- * cable is connected.
+ * example if USB is disabled, this will return %false even if the USB cable
+ * is connected.
  */
-static bool smb347_is_online(struct smb347_charger *smb)
+static bool smb347_is_ps_online(struct smb347_charger *smb)
 {
        bool ret;
 
@@ -299,16 +273,17 @@ static bool smb347_is_online(struct smb347_charger *smb)
  */
 static int smb347_charging_status(struct smb347_charger *smb)
 {
+       unsigned int val;
        int ret;
 
-       if (!smb347_is_online(smb))
+       if (!smb347_is_ps_online(smb))
                return 0;
 
-       ret = smb347_read(smb, STAT_C);
+       ret = regmap_read(smb->regmap, STAT_C, &val);
        if (ret < 0)
                return 0;
 
-       return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+       return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
 }
 
 static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
        int ret = 0;
 
        if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
-               dev_dbg(&smb->client->dev,
-                       "charging enable/disable in SW disabled\n");
+               dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
                return 0;
        }
 
        mutex_lock(&smb->lock);
        if (smb->charging_enabled != enable) {
-               ret = smb347_read(smb, CMD_A);
-               if (ret < 0)
-                       goto out;
-
-               smb->charging_enabled = enable;
-
-               if (enable)
-                       ret |= CMD_A_CHG_ENABLED;
-               else
-                       ret &= ~CMD_A_CHG_ENABLED;
-
-               ret = smb347_write(smb, CMD_A, ret);
+               ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+                                        enable ? CMD_A_CHG_ENABLED : 0);
+               if (!ret)
+                       smb->charging_enabled = enable;
        }
-out:
        mutex_unlock(&smb->lock);
        return ret;
 }
@@ -351,7 +316,7 @@ static inline int smb347_charging_disable(struct smb347_charger *smb)
        return smb347_charging_set(smb, false);
 }
 
-static int smb347_update_online(struct smb347_charger *smb)
+static int smb347_start_stop_charging(struct smb347_charger *smb)
 {
        int ret;
 
@@ -360,16 +325,14 @@ static int smb347_update_online(struct smb347_charger *smb)
         * disable or enable the charging. We do it manually because it
         * depends on how the platform has configured the valid inputs.
         */
-       if (smb347_is_online(smb)) {
+       if (smb347_is_ps_online(smb)) {
                ret = smb347_charging_enable(smb);
                if (ret < 0)
-                       dev_err(&smb->client->dev,
-                               "failed to enable charging\n");
+                       dev_err(smb->dev, "failed to enable charging\n");
        } else {
                ret = smb347_charging_disable(smb);
                if (ret < 0)
-                       dev_err(&smb->client->dev,
-                               "failed to disable charging\n");
+                       dev_err(smb->dev, "failed to disable charging\n");
        }
 
        return ret;
@@ -377,112 +340,120 @@ static int smb347_update_online(struct smb347_charger *smb)
 
 static int smb347_set_charge_current(struct smb347_charger *smb)
 {
-       int ret, val;
-
-       ret = smb347_read(smb, CFG_CHARGE_CURRENT);
-       if (ret < 0)
-               return ret;
+       int ret;
 
        if (smb->pdata->max_charge_current) {
-               val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+               ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
                                    smb->pdata->max_charge_current);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
-               ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+                                        CFG_CHARGE_CURRENT_FCC_MASK,
+                                        ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->pre_charge_current) {
-               val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+               ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
                                    smb->pdata->pre_charge_current);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
-               ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+                                        CFG_CHARGE_CURRENT_PCC_MASK,
+                                        ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->termination_current) {
-               val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+               ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
                                    smb->pdata->termination_current);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
-               ret |= val;
+               ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+                                        CFG_CHARGE_CURRENT_TC_MASK, ret);
+               if (ret < 0)
+                       return ret;
        }
 
-       return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+       return 0;
 }
 
 static int smb347_set_current_limits(struct smb347_charger *smb)
 {
-       int ret, val;
-
-       ret = smb347_read(smb, CFG_CURRENT_LIMIT);
-       if (ret < 0)
-               return ret;
+       int ret;
 
        if (smb->pdata->mains_current_limit) {
-               val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+               ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
                                    smb->pdata->mains_current_limit);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
-               ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+                                        CFG_CURRENT_LIMIT_DC_MASK,
+                                        ret << CFG_CURRENT_LIMIT_DC_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->usb_hc_current_limit) {
-               val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+               ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
                                    smb->pdata->usb_hc_current_limit);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
-               ret |= val;
+               ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+                                        CFG_CURRENT_LIMIT_USB_MASK, ret);
+               if (ret < 0)
+                       return ret;
        }
 
-       return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+       return 0;
 }
 
 static int smb347_set_voltage_limits(struct smb347_charger *smb)
 {
-       int ret, val;
-
-       ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
-       if (ret < 0)
-               return ret;
+       int ret;
 
        if (smb->pdata->pre_to_fast_voltage) {
-               val = smb->pdata->pre_to_fast_voltage;
+               ret = smb->pdata->pre_to_fast_voltage;
 
                /* uV */
-               val = clamp_val(val, 2400000, 3000000) - 2400000;
-               val /= 200000;
+               ret = clamp_val(ret, 2400000, 3000000) - 2400000;
+               ret /= 200000;
 
-               ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
-               ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+                               CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
+                               ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->max_charge_voltage) {
-               val = smb->pdata->max_charge_voltage;
+               ret = smb->pdata->max_charge_voltage;
 
                /* uV */
-               val = clamp_val(val, 3500000, 4500000) - 3500000;
-               val /= 20000;
+               ret = clamp_val(ret, 3500000, 4500000) - 3500000;
+               ret /= 20000;
 
-               ret |= val;
+               ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+                                        CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
+               if (ret < 0)
+                       return ret;
        }
 
-       return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+       return 0;
 }
 
 static int smb347_set_temp_limits(struct smb347_charger *smb)
 {
        bool enable_therm_monitor = false;
-       int ret, val;
+       int ret = 0;
+       int val;
 
        if (smb->pdata->chip_temp_threshold) {
                val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                val = clamp_val(val, 100, 130) - 100;
                val /= 10;
 
-               ret = smb347_read(smb, CFG_OTG);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
-               ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
-
-               ret = smb347_write(smb, CFG_OTG, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_OTG,
+                                        CFG_OTG_TEMP_THRESHOLD_MASK,
+                                        val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
                if (ret < 0)
                        return ret;
        }
 
-       ret = smb347_read(smb, CFG_TEMP_LIMIT);
-       if (ret < 0)
-               return ret;
-
        if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
                val = smb->pdata->soft_cold_temp_limit;
 
@@ -515,8 +477,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                /* this goes from higher to lower so invert the value */
                val = ~val & 0x3;
 
-               ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
-               ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_SOFT_COLD_MASK,
+                                        val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
@@ -527,8 +492,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                val = clamp_val(val, 40, 55) - 40;
                val /= 5;
 
-               ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
-               ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_SOFT_HOT_MASK,
+                                        val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
@@ -541,8 +509,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                /* this goes from higher to lower so invert the value */
                val = ~val & 0x3;
 
-               ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
-               ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_HARD_COLD_MASK,
+                                        val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
@@ -553,16 +524,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                val = clamp_val(val, 50, 65) - 50;
                val /= 5;
 
-               ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
-               ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_HARD_HOT_MASK,
+                                        val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
 
-       ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
-       if (ret < 0)
-               return ret;
-
        /*
         * If any of the temperature limits are set, we also enable the
         * thermistor monitoring.
@@ -574,25 +544,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
         * depending on the configuration.
         */
        if (enable_therm_monitor) {
-               ret = smb347_read(smb, CFG_THERM);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_THERM_MONITOR_DISABLED;
-
-               ret = smb347_write(smb, CFG_THERM, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_THERM,
+                                        CFG_THERM_MONITOR_DISABLED, 0);
                if (ret < 0)
                        return ret;
        }
 
        if (smb->pdata->suspend_on_hard_temp_limit) {
-               ret = smb347_read(smb, CFG_SYSOK);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
-
-               ret = smb347_write(smb, CFG_SYSOK, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
                if (ret < 0)
                        return ret;
        }
@@ -601,17 +561,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
            SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
                val = smb->pdata->soft_temp_limit_compensation & 0x3;
 
-               ret = smb347_read(smb, CFG_THERM);
+               ret = regmap_update_bits(smb->regmap, CFG_THERM,
+                                CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
+                                val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
                if (ret < 0)
                        return ret;
 
-               ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
-               ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
-
-               ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
-               ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
-
-               ret = smb347_write(smb, CFG_THERM, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_THERM,
+                                CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
+                                val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
                if (ret < 0)
                        return ret;
        }
@@ -622,14 +580,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                if (val < 0)
                        return val;
 
-               ret = smb347_read(smb, CFG_OTG);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
-               ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
-
-               ret = smb347_write(smb, CFG_OTG, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_OTG,
+                               CFG_OTG_CC_COMPENSATION_MASK,
+                               (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
                if (ret < 0)
                        return ret;
        }
@@ -648,22 +601,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
  */
 static int smb347_set_writable(struct smb347_charger *smb, bool writable)
 {
-       int ret;
-
-       ret = smb347_read(smb, CMD_A);
-       if (ret < 0)
-               return ret;
-
-       if (writable)
-               ret |= CMD_A_ALLOW_WRITE;
-       else
-               ret &= ~CMD_A_ALLOW_WRITE;
-
-       return smb347_write(smb, CMD_A, ret);
+       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+                                 writable ? CMD_A_ALLOW_WRITE : 0);
 }
 
 static int smb347_hw_init(struct smb347_charger *smb)
 {
+       unsigned int val;
        int ret;
 
        ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@ static int smb347_hw_init(struct smb347_charger *smb)
 
        /* If USB charging is disabled we put the USB in suspend mode */
        if (!smb->pdata->use_usb) {
-               ret = smb347_read(smb, CMD_A);
-               if (ret < 0)
-                       goto fail;
-
-               ret |= CMD_A_SUSPEND_ENABLED;
-
-               ret = smb347_write(smb, CMD_A, ret);
+               ret = regmap_update_bits(smb->regmap, CMD_A,
+                                        CMD_A_SUSPEND_ENABLED,
+                                        CMD_A_SUSPEND_ENABLED);
                if (ret < 0)
                        goto fail;
        }
 
-       ret = smb347_read(smb, CFG_OTHER);
-       if (ret < 0)
-               goto fail;
-
        /*
         * If configured by platform data, we enable hardware Auto-OTG
         * support for driving VBUS. Otherwise we disable it.
         */
-       ret &= ~CFG_OTHER_RID_MASK;
-       if (smb->pdata->use_usb_otg)
-               ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
-
-       ret = smb347_write(smb, CFG_OTHER, ret);
-       if (ret < 0)
-               goto fail;
-
-       ret = smb347_read(smb, CFG_PIN);
+       ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
+               smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
        if (ret < 0)
                goto fail;
 
@@ -728,32 +657,33 @@ static int smb347_hw_init(struct smb347_charger *smb)
         * command register unless pin control is specified in the platform
         * data.
         */
-       ret &= ~CFG_PIN_EN_CTRL_MASK;
-
        switch (smb->pdata->enable_control) {
-       case SMB347_CHG_ENABLE_SW:
-               /* Do nothing, 0 means i2c control */
-               break;
        case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
-               ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+               val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
                break;
        case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
-               ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+               val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+               break;
+       default:
+               val = 0;
                break;
        }
 
-       /* Disable Automatic Power Source Detection (APSD) interrupt. */
-       ret &= ~CFG_PIN_EN_APSD_IRQ;
+       ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
+                                val);
+       if (ret < 0)
+               goto fail;
 
-       ret = smb347_write(smb, CFG_PIN, ret);
+       /* Disable Automatic Power Source Detection (APSD) interrupt. */
+       ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
        if (ret < 0)
                goto fail;
 
-       ret = smb347_update_status(smb);
+       ret = smb347_update_ps_status(smb);
        if (ret < 0)
                goto fail;
 
-       ret = smb347_update_online(smb);
+       ret = smb347_start_stop_charging(smb);
 
 fail:
        smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@ fail:
 static irqreturn_t smb347_interrupt(int irq, void *data)
 {
        struct smb347_charger *smb = data;
-       int stat_c, irqstat_e, irqstat_c;
-       irqreturn_t ret = IRQ_NONE;
+       unsigned int stat_c, irqstat_e, irqstat_c;
+       bool handled = false;
+       int ret;
 
-       stat_c = smb347_read(smb, STAT_C);
-       if (stat_c < 0) {
-               dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+       ret = regmap_read(smb->regmap, STAT_C, &stat_c);
+       if (ret < 0) {
+               dev_warn(smb->dev, "reading STAT_C failed\n");
                return IRQ_NONE;
        }
 
-       irqstat_c = smb347_read(smb, IRQSTAT_C);
-       if (irqstat_c < 0) {
-               dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+       ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
+       if (ret < 0) {
+               dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
                return IRQ_NONE;
        }
 
-       irqstat_e = smb347_read(smb, IRQSTAT_E);
-       if (irqstat_e < 0) {
-               dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+       ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
+       if (ret < 0) {
+               dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
                return IRQ_NONE;
        }
 
@@ -789,13 +720,11 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
         * disable charging.
         */
        if (stat_c & STAT_C_CHARGER_ERROR) {
-               dev_err(&smb->client->dev,
-                       "error in charger, disabling charging\n");
+               dev_err(smb->dev, "error in charger, disabling charging\n");
 
                smb347_charging_disable(smb);
                power_supply_changed(&smb->battery);
-
-               ret = IRQ_HANDLED;
+               handled = true;
        }
 
        /*
@@ -806,7 +735,7 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
        if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
                if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
                        power_supply_changed(&smb->battery);
-               ret = IRQ_HANDLED;
+               handled = true;
        }
 
        /*
@@ -814,15 +743,17 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
         * was connected or disconnected.
         */
        if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
-               if (smb347_update_status(smb) > 0) {
-                       smb347_update_online(smb);
-                       power_supply_changed(&smb->mains);
-                       power_supply_changed(&smb->usb);
+               if (smb347_update_ps_status(smb) > 0) {
+                       smb347_start_stop_charging(smb);
+                       if (smb->pdata->use_mains)
+                               power_supply_changed(&smb->mains);
+                       if (smb->pdata->use_usb)
+                               power_supply_changed(&smb->usb);
                }
-               ret = IRQ_HANDLED;
+               handled = true;
        }
 
-       return ret;
+       return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
 static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
         *      - termination current reached
         *      - charger error
         */
-       if (enable) {
-               ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_write(smb, CFG_STATUS_IRQ,
-                                  CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_read(smb, CFG_PIN);
-               if (ret < 0)
-                       goto fail;
-
-               ret |= CFG_PIN_EN_CHARGER_ERROR;
-
-               ret = smb347_write(smb, CFG_PIN, ret);
-       } else {
-               ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_read(smb, CFG_PIN);
-               if (ret < 0)
-                       goto fail;
-
-               ret &= ~CFG_PIN_EN_CHARGER_ERROR;
+       ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
+                                enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
+       if (ret < 0)
+               goto fail;
 
-               ret = smb347_write(smb, CFG_PIN, ret);
-       }
+       ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
+                       enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
+       if (ret < 0)
+               goto fail;
 
+       ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
+                                enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
 fail:
        smb347_set_writable(smb, false);
        return ret;
@@ -889,18 +797,18 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
        return smb347_irq_set(smb, false);
 }
 
-static int smb347_irq_init(struct smb347_charger *smb)
+static int smb347_irq_init(struct smb347_charger *smb,
+                          struct i2c_client *client)
 {
        const struct smb347_charger_platform_data *pdata = smb->pdata;
        int ret, irq = gpio_to_irq(pdata->irq_gpio);
 
-       ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+       ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
        if (ret < 0)
                goto fail;
 
        ret = request_threaded_irq(irq, NULL, smb347_interrupt,
-                                  IRQF_TRIGGER_FALLING, smb->client->name,
-                                  smb);
+                                  IRQF_TRIGGER_FALLING, client->name, smb);
        if (ret < 0)
                goto fail_gpio;
 
@@ -912,23 +820,14 @@ static int smb347_irq_init(struct smb347_charger *smb)
         * Configure the STAT output to be suitable for interrupts: disable
         * all other output (except interrupts) and make it active low.
         */
-       ret = smb347_read(smb, CFG_STAT);
-       if (ret < 0)
-               goto fail_readonly;
-
-       ret &= ~CFG_STAT_ACTIVE_HIGH;
-       ret |= CFG_STAT_DISABLED;
-
-       ret = smb347_write(smb, CFG_STAT, ret);
-       if (ret < 0)
-               goto fail_readonly;
-
-       ret = smb347_irq_enable(smb);
+       ret = regmap_update_bits(smb->regmap, CFG_STAT,
+                                CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+                                CFG_STAT_DISABLED);
        if (ret < 0)
                goto fail_readonly;
 
        smb347_set_writable(smb, false);
-       smb->client->irq = irq;
+       client->irq = irq;
        return 0;
 
 fail_readonly:
@@ -938,7 +837,7 @@ fail_irq:
 fail_gpio:
        gpio_free(pdata->irq_gpio);
 fail:
-       smb->client->irq = 0;
+       client->irq = 0;
        return ret;
 }
 
@@ -987,13 +886,13 @@ static int smb347_battery_get_property(struct power_supply *psy,
        const struct smb347_charger_platform_data *pdata = smb->pdata;
        int ret;
 
-       ret = smb347_update_status(smb);
+       ret = smb347_update_ps_status(smb);
        if (ret < 0)
                return ret;
 
        switch (prop) {
        case POWER_SUPPLY_PROP_STATUS:
-               if (!smb347_is_online(smb)) {
+               if (!smb347_is_ps_online(smb)) {
                        val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
                        break;
                }
@@ -1004,7 +903,7 @@ static int smb347_battery_get_property(struct power_supply *psy,
                break;
 
        case POWER_SUPPLY_PROP_CHARGE_TYPE:
-               if (!smb347_is_online(smb))
+               if (!smb347_is_ps_online(smb))
                        return -ENODATA;
 
                /*
@@ -1036,44 +935,6 @@ static int smb347_battery_get_property(struct power_supply *psy,
                val->intval = pdata->battery_info.voltage_max_design;
                break;
 
-       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               if (!smb347_is_online(smb))
-                       return -ENODATA;
-               ret = smb347_read(smb, STAT_A);
-               if (ret < 0)
-                       return ret;
-
-               ret &= STAT_A_FLOAT_VOLTAGE_MASK;
-               if (ret > 0x3d)
-                       ret = 0x3d;
-
-               val->intval = 3500000 + ret * 20000;
-               break;
-
-       case POWER_SUPPLY_PROP_CURRENT_NOW:
-               if (!smb347_is_online(smb))
-                       return -ENODATA;
-
-               ret = smb347_read(smb, STAT_B);
-               if (ret < 0)
-                       return ret;
-
-               /*
-                * The current value is composition of FCC and PCC values
-                * and we can detect which table to use from bit 5.
-                */
-               if (ret & 0x20) {
-                       val->intval = hw_to_current(fcc_tbl,
-                                                   ARRAY_SIZE(fcc_tbl),
-                                                   ret & 7);
-               } else {
-                       ret >>= 3;
-                       val->intval = hw_to_current(pcc_tbl,
-                                                   ARRAY_SIZE(pcc_tbl),
-                                                   ret & 7);
-               }
-               break;
-
        case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
                val->intval = pdata->battery_info.charge_full_design;
                break;
@@ -1095,64 +956,58 @@ static enum power_supply_property smb347_battery_properties[] = {
        POWER_SUPPLY_PROP_TECHNOLOGY,
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
-       POWER_SUPPLY_PROP_VOLTAGE_NOW,
-       POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
        POWER_SUPPLY_PROP_MODEL_NAME,
 };
 
-static int smb347_debugfs_show(struct seq_file *s, void *data)
+static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
 {
-       struct smb347_charger *smb = s->private;
-       int ret;
-       u8 reg;
-
-       seq_printf(s, "Control registers:\n");
-       seq_printf(s, "==================\n");
-       for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
-               ret = smb347_read(smb, reg);
-               seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
-       }
-       seq_printf(s, "\n");
-
-       seq_printf(s, "Command registers:\n");
-       seq_printf(s, "==================\n");
-       ret = smb347_read(smb, CMD_A);
-       seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
-       ret = smb347_read(smb, CMD_B);
-       seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
-       ret = smb347_read(smb, CMD_C);
-       seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
-       seq_printf(s, "\n");
-
-       seq_printf(s, "Interrupt status registers:\n");
-       seq_printf(s, "===========================\n");
-       for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
-               ret = smb347_read(smb, reg);
-               seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
-       }
-       seq_printf(s, "\n");
-
-       seq_printf(s, "Status registers:\n");
-       seq_printf(s, "=================\n");
-       for (reg = STAT_A; reg <= STAT_E; reg++) {
-               ret = smb347_read(smb, reg);
-               seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+       switch (reg) {
+       case IRQSTAT_A:
+       case IRQSTAT_C:
+       case IRQSTAT_E:
+       case IRQSTAT_F:
+       case STAT_A:
+       case STAT_B:
+       case STAT_C:
+       case STAT_E:
+               return true;
        }
 
-       return 0;
+       return false;
 }
 
-static int smb347_debugfs_open(struct inode *inode, struct file *file)
+static bool smb347_readable_reg(struct device *dev, unsigned int reg)
 {
-       return single_open(file, smb347_debugfs_show, inode->i_private);
+       switch (reg) {
+       case CFG_CHARGE_CURRENT:
+       case CFG_CURRENT_LIMIT:
+       case CFG_FLOAT_VOLTAGE:
+       case CFG_STAT:
+       case CFG_PIN:
+       case CFG_THERM:
+       case CFG_SYSOK:
+       case CFG_OTHER:
+       case CFG_OTG:
+       case CFG_TEMP_LIMIT:
+       case CFG_FAULT_IRQ:
+       case CFG_STATUS_IRQ:
+       case CFG_ADDRESS:
+       case CMD_A:
+       case CMD_B:
+       case CMD_C:
+               return true;
+       }
+
+       return smb347_volatile_reg(dev, reg);
 }
 
-static const struct file_operations smb347_debugfs_fops = {
-       .open           = smb347_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct regmap_config smb347_regmap = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .max_register   = SMB347_MAX_REGISTER,
+       .volatile_reg   = smb347_volatile_reg,
+       .readable_reg   = smb347_readable_reg,
 };
 
 static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@ static int smb347_probe(struct i2c_client *client,
        i2c_set_clientdata(client, smb);
 
        mutex_init(&smb->lock);
-       smb->client = client;
+       smb->dev = &client->dev;
        smb->pdata = pdata;
 
+       smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
+       if (IS_ERR(smb->regmap))
+               return PTR_ERR(smb->regmap);
+
        ret = smb347_hw_init(smb);
        if (ret < 0)
                return ret;
 
-       smb->mains.name = "smb347-mains";
-       smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
-       smb->mains.get_property = smb347_mains_get_property;
-       smb->mains.properties = smb347_mains_properties;
-       smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
-       smb->mains.supplied_to = battery;
-       smb->mains.num_supplicants = ARRAY_SIZE(battery);
-
-       smb->usb.name = "smb347-usb";
-       smb->usb.type = POWER_SUPPLY_TYPE_USB;
-       smb->usb.get_property = smb347_usb_get_property;
-       smb->usb.properties = smb347_usb_properties;
-       smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
-       smb->usb.supplied_to = battery;
-       smb->usb.num_supplicants = ARRAY_SIZE(battery);
+       if (smb->pdata->use_mains) {
+               smb->mains.name = "smb347-mains";
+               smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+               smb->mains.get_property = smb347_mains_get_property;
+               smb->mains.properties = smb347_mains_properties;
+               smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+               smb->mains.supplied_to = battery;
+               smb->mains.num_supplicants = ARRAY_SIZE(battery);
+               ret = power_supply_register(dev, &smb->mains);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (smb->pdata->use_usb) {
+               smb->usb.name = "smb347-usb";
+               smb->usb.type = POWER_SUPPLY_TYPE_USB;
+               smb->usb.get_property = smb347_usb_get_property;
+               smb->usb.properties = smb347_usb_properties;
+               smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+               smb->usb.supplied_to = battery;
+               smb->usb.num_supplicants = ARRAY_SIZE(battery);
+               ret = power_supply_register(dev, &smb->usb);
+               if (ret < 0) {
+                       if (smb->pdata->use_mains)
+                               power_supply_unregister(&smb->mains);
+                       return ret;
+               }
+       }
 
        smb->battery.name = "smb347-battery";
        smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@ static int smb347_probe(struct i2c_client *client,
        smb->battery.properties = smb347_battery_properties;
        smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
 
-       ret = power_supply_register(dev, &smb->mains);
-       if (ret < 0)
-               return ret;
-
-       ret = power_supply_register(dev, &smb->usb);
-       if (ret < 0) {
-               power_supply_unregister(&smb->mains);
-               return ret;
-       }
 
        ret = power_supply_register(dev, &smb->battery);
        if (ret < 0) {
-               power_supply_unregister(&smb->usb);
-               power_supply_unregister(&smb->mains);
+               if (smb->pdata->use_usb)
+                       power_supply_unregister(&smb->usb);
+               if (smb->pdata->use_mains)
+                       power_supply_unregister(&smb->mains);
                return ret;
        }
 
@@ -1229,15 +1094,15 @@ static int smb347_probe(struct i2c_client *client,
         * interrupt support here.
         */
        if (pdata->irq_gpio >= 0) {
-               ret = smb347_irq_init(smb);
+               ret = smb347_irq_init(smb, client);
                if (ret < 0) {
                        dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
                        dev_warn(dev, "disabling IRQ support\n");
+               } else {
+                       smb347_irq_enable(smb);
                }
        }
 
-       smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
-                                         &smb347_debugfs_fops);
        return 0;
 }
 
@@ -1245,9 +1110,6 @@ static int smb347_remove(struct i2c_client *client)
 {
        struct smb347_charger *smb = i2c_get_clientdata(client);
 
-       if (!IS_ERR_OR_NULL(smb->dentry))
-               debugfs_remove(smb->dentry);
-
        if (client->irq) {
                smb347_irq_disable(smb);
                free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@ static int smb347_remove(struct i2c_client *client)
        }
 
        power_supply_unregister(&smb->battery);
-       power_supply_unregister(&smb->usb);
-       power_supply_unregister(&smb->mains);
+       if (smb->pdata->use_usb)
+               power_supply_unregister(&smb->usb);
+       if (smb->pdata->use_mains)
+               power_supply_unregister(&smb->mains);
        return 0;
 }
 
@@ -1275,17 +1139,7 @@ static struct i2c_driver smb347_driver = {
        .id_table     = smb347_id,
 };
 
-static int __init smb347_init(void)
-{
-       return i2c_add_driver(&smb347_driver);
-}
-module_init(smb347_init);
-
-static void __exit smb347_exit(void)
-{
-       i2c_del_driver(&smb347_driver);
-}
-module_exit(smb347_exit);
+module_i2c_driver(smb347_driver);
 
 MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
index bc871923879303f5771a27c83f7c148850682ddd..6194d35ebb9740c0af7f999dcb7b73aec75d91ca 100644 (file)
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
          ports for Input/Output direction to allow other traffic
          than Maintenance transfers.
 
+config RAPIDIO_DMA_ENGINE
+       bool "DMA Engine support for RapidIO"
+       depends on RAPIDIO
+       select DMADEVICES
+       select DMA_ENGINE
+       help
+         Say Y here if you want to use DMA Engine frameork for RapidIO data
+         transfers to/from target RIO devices. RapidIO uses NREAD and
+         NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
+         memory and memory on remote target device. You need a DMA controller
+         capable to perform data transfers to/from RapidIO.
+
+         If you are unsure about this, say Y here.
+
 config RAPIDIO_DEBUG
        bool "RapidIO subsystem debug messages"
        depends on RAPIDIO
index 3b7b4e2dff7c8a07dd159c85037302c16678b8bd..7b62860f34f805842ab9fbe28ea35a6909f029b3 100644 (file)
@@ -3,3 +3,6 @@
 #
 
 obj-$(CONFIG_RAPIDIO_TSI721)   += tsi721.o
+ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
+obj-$(CONFIG_RAPIDIO_TSI721)   += tsi721_dma.o
+endif
index 30d2072f480b72947c74401d5522fbb9d697d313..722246cf20ab2ed592a6a5ed5435b0f1320a5a44 100644 (file)
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
                        u16 destid, u8 hopcount, u32 offset, int len,
                        u32 *data, int do_wr)
 {
+       void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
        struct tsi721_dma_desc *bd_ptr;
        u32 rd_count, swr_ptr, ch_stat;
        int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
        if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
                return -EINVAL;
 
-       bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+       bd_ptr = priv->mdma.bd_base;
 
-       rd_count = ioread32(
-                       priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+       rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
 
        /* Initialize DMA descriptor */
        bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
        mb();
 
        /* Start DMA operation */
-       iowrite32(rd_count + 2,
-               priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
-       ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+       iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
+       ioread32(regs + TSI721_DMAC_DWRCNT);
        i = 0;
 
        /* Wait until DMA transfer is finished */
-       while ((ch_stat = ioread32(priv->regs +
-               TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+       while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+                                                       & TSI721_DMAC_STS_RUN) {
                udelay(1);
                if (++i >= 5000000) {
                        dev_dbg(&priv->pdev->dev,
                                "%s : DMA[%d] read timeout ch_status=%x\n",
-                               __func__, TSI721_DMACH_MAINT, ch_stat);
+                               __func__, priv->mdma.ch_id, ch_stat);
                        if (!do_wr)
                                *data = 0xffffffff;
                        err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
                        __func__, ch_stat);
                dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
                        do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
-               iowrite32(TSI721_DMAC_INT_ALL,
-                       priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
-               iowrite32(TSI721_DMAC_CTL_INIT,
-                       priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+               iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+               iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
                udelay(10);
-               iowrite32(0, priv->regs +
-                               TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+               iowrite32(0, regs + TSI721_DMAC_DWRCNT);
                udelay(1);
                if (!do_wr)
                        *data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
         * NOTE: Skipping check and clear FIFO entries because we are waiting
         * for transfer to be completed.
         */
-       swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
-       iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+       swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+       iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
 err_out:
 
        return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
                        tsi721_pw_handler(mport);
        }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+               int ch;
+
+               if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+                       dev_dbg(&priv->pdev->dev,
+                               "IRQ from DMA channel 0x%08x\n", dev_ch_int);
+
+                       for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+                               if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+                                       continue;
+                               tsi721_bdma_handler(&priv->bdma[ch]);
+                       }
+               }
+       }
+#endif
        return IRQ_HANDLED;
 }
 
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
                priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
        iowrite32(TSI721_SR_CHINT_IDBQRCV,
                priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
-       iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
-               priv->regs + TSI721_DEV_CHAN_INTE);
 
        /* Enable SRIO MAC interrupts */
        iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
                priv->regs + TSI721_RIO_EM_DEV_INT_EN);
 
+       /* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+               (TSI721_INT_BDMA_CHAN_M &
+                ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+       intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+       iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
+
        if (priv->flags & TSI721_USING_MSIX)
                intr = TSI721_DEV_INT_SRIO;
        else
                intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
-                       TSI721_DEV_INT_SMSG_CH;
+                       TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
 
        iowrite32(intr, priv->regs + TSI721_DEV_INTE);
        ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
                                        TSI721_MSIX_OMSG_INT(i);
        }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       /*
+        * Initialize MSI-X entries for Block DMA Engine:
+        * this driver supports XXX DMA channels
+        * (one is reserved for SRIO maintenance transactions)
+        */
+       for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+               entries[TSI721_VECT_DMA0_DONE + i].entry =
+                                       TSI721_MSIX_DMACH_DONE(i);
+               entries[TSI721_VECT_DMA0_INT + i].entry =
+                                       TSI721_MSIX_DMACH_INT(i);
+       }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
        err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
        if (err) {
                if (err > 0)
                        dev_info(&priv->pdev->dev,
                                 "Only %d MSI-X vectors available, "
                                 "not using MSI-X\n", err);
+               else
+                       dev_err(&priv->pdev->dev,
+                               "Failed to enable MSI-X (err=%d)\n", err);
                return err;
        }
 
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
                         i, pci_name(priv->pdev));
        }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+               priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+                               entries[TSI721_VECT_DMA0_DONE + i].vector;
+               snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+                        IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+                        i, pci_name(priv->pdev));
+
+               priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+                               entries[TSI721_VECT_DMA0_INT + i].vector;
+               snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+                        IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+                        i, pci_name(priv->pdev));
+       }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
        return 0;
 }
 #endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
        priv->idb_base = NULL;
 }
 
-static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
 {
        struct tsi721_dma_desc *bd_ptr;
        u64             *sts_ptr;
        dma_addr_t      bd_phys, sts_phys;
        int             sts_size;
-       int             bd_num = priv->bdma[chnum].bd_num;
+       int             bd_num = 2;
+       void __iomem    *regs;
 
-       dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+       dev_dbg(&priv->pdev->dev,
+               "Init Block DMA Engine for Maintenance requests, CH%d\n",
+               TSI721_DMACH_MAINT);
 
        /*
         * Initialize DMA channel for maintenance requests
         */
 
+       priv->mdma.ch_id = TSI721_DMACH_MAINT;
+       regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
        /* Allocate space for DMA descriptors */
        bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                        bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        if (!bd_ptr)
                return -ENOMEM;
 
-       priv->bdma[chnum].bd_phys = bd_phys;
-       priv->bdma[chnum].bd_base = bd_ptr;
+       priv->mdma.bd_num = bd_num;
+       priv->mdma.bd_phys = bd_phys;
+       priv->mdma.bd_base = bd_ptr;
 
        dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
                bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
                dma_free_coherent(&priv->pdev->dev,
                                  bd_num * sizeof(struct tsi721_dma_desc),
                                  bd_ptr, bd_phys);
-               priv->bdma[chnum].bd_base = NULL;
+               priv->mdma.bd_base = NULL;
                return -ENOMEM;
        }
 
-       priv->bdma[chnum].sts_phys = sts_phys;
-       priv->bdma[chnum].sts_base = sts_ptr;
-       priv->bdma[chnum].sts_size = sts_size;
+       priv->mdma.sts_phys = sts_phys;
+       priv->mdma.sts_base = sts_ptr;
+       priv->mdma.sts_size = sts_size;
 
        dev_dbg(&priv->pdev->dev,
                "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
 
        /* Setup DMA descriptor pointers */
-       iowrite32(((u64)bd_phys >> 32),
-               priv->regs + TSI721_DMAC_DPTRH(chnum));
+       iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
        iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
-               priv->regs + TSI721_DMAC_DPTRL(chnum));
+               regs + TSI721_DMAC_DPTRL);
 
        /* Setup descriptor status FIFO */
-       iowrite32(((u64)sts_phys >> 32),
-               priv->regs + TSI721_DMAC_DSBH(chnum));
+       iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
        iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
-               priv->regs + TSI721_DMAC_DSBL(chnum));
+               regs + TSI721_DMAC_DSBL);
        iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
-               priv->regs + TSI721_DMAC_DSSZ(chnum));
+               regs + TSI721_DMAC_DSSZ);
 
        /* Clear interrupt bits */
-       iowrite32(TSI721_DMAC_INT_ALL,
-               priv->regs + TSI721_DMAC_INT(chnum));
+       iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
 
-       ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+       ioread32(regs + TSI721_DMAC_INT);
 
        /* Toggle DMA channel initialization */
-       iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
-       ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+       iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
+       ioread32(regs + TSI721_DMAC_CTL);
        udelay(10);
 
        return 0;
 }
 
-static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
 {
        u32 ch_stat;
+       struct tsi721_bdma_maint *mdma = &priv->mdma;
+       void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
 
-       if (priv->bdma[chnum].bd_base == NULL)
+       if (mdma->bd_base == NULL)
                return 0;
 
        /* Check if DMA channel still running */
-       ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
+       ch_stat = ioread32(regs + TSI721_DMAC_STS);
        if (ch_stat & TSI721_DMAC_STS_RUN)
                return -EFAULT;
 
        /* Put DMA channel into init state */
-       iowrite32(TSI721_DMAC_CTL_INIT,
-               priv->regs + TSI721_DMAC_CTL(chnum));
+       iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
 
        /* Free space allocated for DMA descriptors */
        dma_free_coherent(&priv->pdev->dev,
-               priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
-               priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
-       priv->bdma[chnum].bd_base = NULL;
+               mdma->bd_num * sizeof(struct tsi721_dma_desc),
+               mdma->bd_base, mdma->bd_phys);
+       mdma->bd_base = NULL;
 
        /* Free space allocated for status FIFO */
        dma_free_coherent(&priv->pdev->dev,
-               priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
-               priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
-       priv->bdma[chnum].sts_base = NULL;
-       return 0;
-}
-
-static int tsi721_bdma_init(struct tsi721_device *priv)
-{
-       /* Initialize BDMA channel allocated for RapidIO maintenance read/write
-        * request generation
-        */
-       priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
-       if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
-               dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
-                       " channel %d, aborting\n", TSI721_DMACH_MAINT);
-               return -ENOMEM;
-       }
-
+               mdma->sts_size * sizeof(struct tsi721_dma_sts),
+               mdma->sts_base, mdma->sts_phys);
+       mdma->sts_base = NULL;
        return 0;
 }
 
-static void tsi721_bdma_free(struct tsi721_device *priv)
-{
-       tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
-}
-
 /* Enable Inbound Messaging Interrupts */
 static void
 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
 
        /* Disable all BDMA Channel interrupts */
        for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
-               iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+               iowrite32(0,
+                       priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
 
        /* Disable all general BDMA interrupts */
        iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        mport->phy_type = RIO_PHY_SERIAL;
        mport->priv = (void *)priv;
        mport->phys_efptr = 0x100;
+       priv->mport = mport;
 
        INIT_LIST_HEAD(&mport->dbells);
 
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        if (!err) {
                tsi721_interrupts_init(priv);
                ops->pwenable = tsi721_pw_enable;
-       } else
+       } else {
                dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
                        "vector %02X err=0x%x\n", pdev->irq, err);
+               goto err_exit;
+       }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       tsi721_register_dma(priv);
+#endif
        /* Enable SRIO link */
        iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
                  TSI721_DEVCTL_SRBOOT_CMPL,
                  priv->regs + TSI721_DEVCTL);
 
        rio_register_mport(mport);
-       priv->mport = mport;
 
        if (mport->host_deviceid >= 0)
                iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
                iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
 
        return 0;
+
+err_exit:
+       kfree(mport);
+       kfree(ops);
+       return err;
 }
 
 static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
        tsi721_init_pc2sr_mapping(priv);
        tsi721_init_sr2pc_mapping(priv);
 
-       if (tsi721_bdma_init(priv)) {
+       if (tsi721_bdma_maint_init(priv)) {
                dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
                err = -ENOMEM;
                goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
 err_free_consistent:
        tsi721_doorbell_free(priv);
 err_free_bdma:
-       tsi721_bdma_free(priv);
+       tsi721_bdma_maint_free(priv);
 err_unmap_bars:
        if (priv->regs)
                iounmap(priv->regs);
index 1c226b31af13fc9e196d221a4e3557f91ae9ae6c..59de9d7be3460a08250bcd197dc858bed0af6dee 100644 (file)
 #define TSI721_DEV_INTE                0x29840
 #define TSI721_DEV_INT         0x29844
 #define TSI721_DEV_INTSET      0x29848
+#define TSI721_DEV_INT_BDMA_CH 0x00002000
+#define TSI721_DEV_INT_BDMA_NCH        0x00001000
 #define TSI721_DEV_INT_SMSG_CH 0x00000800
 #define TSI721_DEV_INT_SMSG_NCH        0x00000400
 #define TSI721_DEV_INT_SR2PC_CH        0x00000200
 #define TSI721_INT_IMSG_CHAN(x)        (1 << (16 + (x)))
 #define TSI721_INT_OMSG_CHAN_M 0x0000ff00
 #define TSI721_INT_OMSG_CHAN(x)        (1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M 0x000000ff
+#define TSI721_INT_BDMA_CHAN(x)        (1 << (x))
 
 /*
  * PC2SR block registers
  *   x = 0..7
  */
 
-#define TSI721_DMAC_DWRCNT(x)  (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_DRDCNT(x)  (0x51004 + (x) * 0x1000)
+#define TSI721_DMAC_BASE(x)    (0x51000 + (x) * 0x1000)
 
-#define TSI721_DMAC_CTL(x)     (0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_DWRCNT     0x000
+#define TSI721_DMAC_DRDCNT     0x004
+
+#define TSI721_DMAC_CTL                0x008
 #define TSI721_DMAC_CTL_SUSP   0x00000002
 #define TSI721_DMAC_CTL_INIT   0x00000001
 
-#define TSI721_DMAC_INT(x)     (0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT                0x00c
 #define TSI721_DMAC_INT_STFULL 0x00000010
 #define TSI721_DMAC_INT_DONE   0x00000008
 #define TSI721_DMAC_INT_SUSP   0x00000004
 #define TSI721_DMAC_INT_IOFDONE        0x00000001
 #define TSI721_DMAC_INT_ALL    0x0000001f
 
-#define TSI721_DMAC_INTSET(x)  (0x51010 + (x) * 0x1000)
+#define TSI721_DMAC_INTSET     0x010
 
-#define TSI721_DMAC_STS(x)     (0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS                0x014
 #define TSI721_DMAC_STS_ABORT  0x00400000
 #define TSI721_DMAC_STS_RUN    0x00200000
 #define TSI721_DMAC_STS_CS     0x001f0000
 
-#define TSI721_DMAC_INTE(x)    (0x51018 + (x) * 0x1000)
+#define TSI721_DMAC_INTE       0x018
 
-#define TSI721_DMAC_DPTRL(x)   (0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL      0x024
 #define TSI721_DMAC_DPTRL_MASK 0xffffffe0
 
-#define TSI721_DMAC_DPTRH(x)   (0x51028 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRH      0x028
 
-#define TSI721_DMAC_DSBL(x)    (0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL       0x02c
 #define TSI721_DMAC_DSBL_MASK  0xffffffc0
 
-#define TSI721_DMAC_DSBH(x)    (0x51030 + (x) * 0x1000)
+#define TSI721_DMAC_DSBH       0x030
 
-#define TSI721_DMAC_DSSZ(x)    (0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ       0x034
 #define TSI721_DMAC_DSSZ_SIZE_M        0x0000000f
 #define TSI721_DMAC_DSSZ_SIZE(size)    (__fls(size) - 4)
 
-
-#define TSI721_DMAC_DSRP(x)    (0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP       0x038
 #define TSI721_DMAC_DSRP_MASK  0x0007ffff
 
-#define TSI721_DMAC_DSWP(x)    (0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP       0x03c
 #define TSI721_DMAC_DSWP_MASK  0x0007ffff
 
 #define TSI721_BDMA_INTE       0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
 #define TSI721_DMACH_MAINT     0       /* DMA channel for maint requests */
 #define TSI721_DMACH_MAINT_NBD 32      /* Number of BDs for maint requests */
 
+#define TSI721_DMACH_DMA       1       /* DMA channel for data transfers */
+
 #define MSG_DMA_ENTRY_INX_TO_SIZE(x)   ((0x10 << (x)) & 0xFFFF0)
 
 enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
 
 /* Structures */
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct tsi721_tx_desc {
+       struct dma_async_tx_descriptor  txd;
+       struct tsi721_dma_desc          *hw_desc;
+       u16                             destid;
+       /* low 64-bits of 66-bit RIO address */
+       u64                             rio_addr;
+       /* upper 2-bits of 66-bit RIO address */
+       u8                              rio_addr_u;
+       bool                            interrupt;
+       struct list_head                desc_node;
+       struct list_head                tx_list;
+};
+
 struct tsi721_bdma_chan {
+       int             id;
+       void __iomem    *regs;
+       int             bd_num;         /* number of buffer descriptors */
+       void            *bd_base;       /* start of DMA descriptors */
+       dma_addr_t      bd_phys;
+       void            *sts_base;      /* start of DMA BD status FIFO */
+       dma_addr_t      sts_phys;
+       int             sts_size;
+       u32             sts_rdptr;
+       u32             wr_count;
+       u32             wr_count_next;
+
+       struct dma_chan         dchan;
+       struct tsi721_tx_desc   *tx_desc;
+       spinlock_t              lock;
+       struct list_head        active_list;
+       struct list_head        queue;
+       struct list_head        free_list;
+       dma_cookie_t            completed_cookie;
+       struct tasklet_struct   tasklet;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+       int             ch_id;          /* BDMA channel number */
        int             bd_num;         /* number of buffer descriptors */
        void            *bd_base;       /* start of DMA descriptors */
        dma_addr_t      bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
        TSI721_VECT_IMB1_INT,
        TSI721_VECT_IMB2_INT,
        TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       TSI721_VECT_DMA0_DONE,
+       TSI721_VECT_DMA1_DONE,
+       TSI721_VECT_DMA2_DONE,
+       TSI721_VECT_DMA3_DONE,
+       TSI721_VECT_DMA4_DONE,
+       TSI721_VECT_DMA5_DONE,
+       TSI721_VECT_DMA6_DONE,
+       TSI721_VECT_DMA7_DONE,
+       TSI721_VECT_DMA0_INT,
+       TSI721_VECT_DMA1_INT,
+       TSI721_VECT_DMA2_INT,
+       TSI721_VECT_DMA3_INT,
+       TSI721_VECT_DMA4_INT,
+       TSI721_VECT_DMA5_INT,
+       TSI721_VECT_DMA6_INT,
+       TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
        TSI721_VECT_MAX
 };
 
@@ -754,7 +820,11 @@ struct tsi721_device {
        u32             pw_discard_count;
 
        /* BDMA Engine */
+       struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
 
        /* Inbound Messaging */
        int             imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
        struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
 };
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
+#endif
+
 #endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644 (file)
index 0000000..92e06a5
--- /dev/null
@@ -0,0 +1,823 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+       return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+       return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static inline
+struct tsi721_tx_desc *tsi721_dma_first_active(
+                               struct tsi721_bdma_chan *bdma_chan)
+{
+       return list_first_entry(&bdma_chan->active_list,
+                               struct tsi721_tx_desc, desc_node);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+{
+       struct tsi721_dma_desc *bd_ptr;
+       struct device *dev = bdma_chan->dchan.device->dev;
+       u64             *sts_ptr;
+       dma_addr_t      bd_phys;
+       dma_addr_t      sts_phys;
+       int             sts_size;
+       int             bd_num = bdma_chan->bd_num;
+
+       dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+
+       /* Allocate space for DMA descriptors */
+       bd_ptr = dma_zalloc_coherent(dev,
+                               bd_num * sizeof(struct tsi721_dma_desc),
+                               &bd_phys, GFP_KERNEL);
+       if (!bd_ptr)
+               return -ENOMEM;
+
+       bdma_chan->bd_phys = bd_phys;
+       bdma_chan->bd_base = bd_ptr;
+
+       dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
+               bd_ptr, (unsigned long long)bd_phys);
+
+       /* Allocate space for descriptor status FIFO */
+       sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+                                       bd_num : TSI721_DMA_MINSTSSZ;
+       sts_size = roundup_pow_of_two(sts_size);
+       sts_ptr = dma_zalloc_coherent(dev,
+                                    sts_size * sizeof(struct tsi721_dma_sts),
+                                    &sts_phys, GFP_KERNEL);
+       if (!sts_ptr) {
+               /* Free space allocated for DMA descriptors */
+               dma_free_coherent(dev,
+                                 bd_num * sizeof(struct tsi721_dma_desc),
+                                 bd_ptr, bd_phys);
+               bdma_chan->bd_base = NULL;
+               return -ENOMEM;
+       }
+
+       bdma_chan->sts_phys = sts_phys;
+       bdma_chan->sts_base = sts_ptr;
+       bdma_chan->sts_size = sts_size;
+
+       dev_dbg(dev,
+               "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+               sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+       /* Initialize DMA descriptors ring */
+       bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+       bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+                                                TSI721_DMAC_DPTRL_MASK);
+       bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+       /* Setup DMA descriptor pointers */
+       iowrite32(((u64)bd_phys >> 32),
+               bdma_chan->regs + TSI721_DMAC_DPTRH);
+       iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+               bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+       /* Setup descriptor status FIFO */
+       iowrite32(((u64)sts_phys >> 32),
+               bdma_chan->regs + TSI721_DMAC_DSBH);
+       iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+               bdma_chan->regs + TSI721_DMAC_DSBL);
+       iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+               bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+       /* Clear interrupt bits */
+       iowrite32(TSI721_DMAC_INT_ALL,
+               bdma_chan->regs + TSI721_DMAC_INT);
+
+       ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+       /* Toggle DMA channel initialization */
+       iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+       ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+       bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+       bdma_chan->sts_rdptr = 0;
+       udelay(10);
+
+       return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+       u32 ch_stat;
+
+       if (bdma_chan->bd_base == NULL)
+               return 0;
+
+       /* Check if DMA channel still running */
+       ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+       if (ch_stat & TSI721_DMAC_STS_RUN)
+               return -EFAULT;
+
+       /* Put DMA channel into init state */
+       iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+
+       /* Free space allocated for DMA descriptors */
+       dma_free_coherent(bdma_chan->dchan.device->dev,
+               bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+               bdma_chan->bd_base, bdma_chan->bd_phys);
+       bdma_chan->bd_base = NULL;
+
+       /* Free space allocated for status FIFO */
+       dma_free_coherent(bdma_chan->dchan.device->dev,
+               bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+               bdma_chan->sts_base, bdma_chan->sts_phys);
+       bdma_chan->sts_base = NULL;
+       return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+       if (enable) {
+               /* Clear pending BDMA channel interrupts */
+               iowrite32(TSI721_DMAC_INT_ALL,
+                       bdma_chan->regs + TSI721_DMAC_INT);
+               ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+               /* Enable BDMA channel interrupts */
+               iowrite32(TSI721_DMAC_INT_ALL,
+                       bdma_chan->regs + TSI721_DMAC_INTE);
+       } else {
+               /* Disable BDMA channel interrupts */
+               iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+               /* Clear pending BDMA channel interrupts */
+               iowrite32(TSI721_DMAC_INT_ALL,
+                       bdma_chan->regs + TSI721_DMAC_INT);
+       }
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+       u32 sts;
+
+       sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+       return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+       /* Disable BDMA channel interrupts */
+       iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+
+       tasklet_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+       struct tsi721_bdma_chan *bdma_chan = ptr;
+
+       tsi721_bdma_handler(bdma_chan);
+       return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+       if (!tsi721_dma_is_idle(bdma_chan)) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "BUG: Attempt to start non-idle channel\n");
+               return;
+       }
+
+       if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "BUG: Attempt to start DMA with no BDs ready\n");
+               return;
+       }
+
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "tx_chan: %p, chan: %d, regs: %p\n",
+               bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+
+       iowrite32(bdma_chan->wr_count_next,
+               bdma_chan->regs + TSI721_DMAC_DWRCNT);
+       ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+       bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
+                           struct tsi721_tx_desc *desc)
+{
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "Put desc: %p into free list\n", desc);
+
+       if (desc) {
+               spin_lock_bh(&bdma_chan->lock);
+               list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+               list_add(&desc->desc_node, &bdma_chan->free_list);
+               bdma_chan->wr_count_next = bdma_chan->wr_count;
+               spin_unlock_bh(&bdma_chan->lock);
+       }
+}
+
+static
+struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+{
+       struct tsi721_tx_desc *tx_desc, *_tx_desc;
+       struct tsi721_tx_desc *ret = NULL;
+       int i;
+
+       spin_lock_bh(&bdma_chan->lock);
+       list_for_each_entry_safe(tx_desc, _tx_desc,
+                                &bdma_chan->free_list, desc_node) {
+               if (async_tx_test_ack(&tx_desc->txd)) {
+                       list_del(&tx_desc->desc_node);
+                       ret = tx_desc;
+                       break;
+               }
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "desc %p not ACKed\n", tx_desc);
+       }
+
+       i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+       if (i == bdma_chan->bd_num - 1) {
+               i = 0;
+               bdma_chan->wr_count_next++; /* skip link descriptor */
+       }
+
+       bdma_chan->wr_count_next++;
+       tx_desc->txd.phys = bdma_chan->bd_phys +
+                               i * sizeof(struct tsi721_dma_desc);
+       tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+
+       spin_unlock_bh(&bdma_chan->lock);
+
+       return ret;
+}
+
+static int
+tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
+       struct tsi721_tx_desc *desc, struct scatterlist *sg,
+       enum dma_rtype rtype, u32 sys_size)
+{
+       struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+       u64 rio_addr;
+
+       if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "SG element is too large\n");
+               return -EINVAL;
+       }
+
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
+               (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
+               sg_dma_len(sg));
+
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "bd_ptr = %p did=%d raddr=0x%llx\n",
+               bd_ptr, desc->destid, desc->rio_addr);
+
+       /* Initialize DMA descriptor */
+       bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+                                       (rtype << 19) | desc->destid);
+       if (desc->interrupt)
+               bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+       bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+                                       (sys_size << 26) | sg_dma_len(sg));
+       rio_addr = (desc->rio_addr >> 2) |
+                               ((u64)(desc->rio_addr_u & 0x3) << 62);
+       bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+       bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+       bd_ptr->t1.bufptr_lo = cpu_to_le32(
+                                       (u64)sg_dma_address(sg) & 0xffffffff);
+       bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+       bd_ptr->t1.s_dist = 0;
+       bd_ptr->t1.s_size = 0;
+
+       return 0;
+}
+
+static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
+                                     struct tsi721_tx_desc *desc)
+{
+       struct dma_async_tx_descriptor *txd = &desc->txd;
+       dma_async_tx_callback callback = txd->callback;
+       void *param = txd->callback_param;
+
+       list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+       list_move(&desc->desc_node, &bdma_chan->free_list);
+       bdma_chan->completed_cookie = txd->cookie;
+
+       if (callback)
+               callback(param);
+}
+
+static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
+{
+       struct tsi721_tx_desc *desc, *_d;
+       LIST_HEAD(list);
+
+       BUG_ON(!tsi721_dma_is_idle(bdma_chan));
+
+       if (!list_empty(&bdma_chan->queue))
+               tsi721_start_dma(bdma_chan);
+
+       list_splice_init(&bdma_chan->active_list, &list);
+       list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+       list_for_each_entry_safe(desc, _d, &list, desc_node)
+               tsi721_dma_chain_complete(bdma_chan, desc);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+       u32 srd_ptr;
+       u64 *sts_ptr;
+       int i, j;
+
+       /* Check and clear descriptor status FIFO entries */
+       srd_ptr = bdma_chan->sts_rdptr;
+       sts_ptr = bdma_chan->sts_base;
+       j = srd_ptr * 8;
+       while (sts_ptr[j]) {
+               for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+                       sts_ptr[j] = 0;
+
+               ++srd_ptr;
+               srd_ptr %= bdma_chan->sts_size;
+               j = srd_ptr * 8;
+       }
+
+       iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+       bdma_chan->sts_rdptr = srd_ptr;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+{
+       if (list_empty(&bdma_chan->active_list) ||
+               list_is_singular(&bdma_chan->active_list)) {
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "%s: Active_list empty\n", __func__);
+               tsi721_dma_complete_all(bdma_chan);
+       } else {
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "%s: Active_list NOT empty\n", __func__);
+               tsi721_dma_chain_complete(bdma_chan,
+                                       tsi721_dma_first_active(bdma_chan));
+               tsi721_start_dma(bdma_chan);
+       }
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+       struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+       u32 dmac_int, dmac_sts;
+
+       dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+       dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
+               __func__, bdma_chan->id, dmac_int);
+       /* Clear channel interrupts */
+       iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+       if (dmac_int & TSI721_DMAC_INT_ERR) {
+               dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+               dev_err(bdma_chan->dchan.device->dev,
+                       "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
+                       __func__, bdma_chan->id, dmac_sts);
+       }
+
+       if (dmac_int & TSI721_DMAC_INT_STFULL) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "%s: DMAC%d descriptor status FIFO is full\n",
+                       __func__, bdma_chan->id);
+       }
+
+       if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+               tsi721_clr_stat(bdma_chan);
+               spin_lock(&bdma_chan->lock);
+               tsi721_advance_work(bdma_chan);
+               spin_unlock(&bdma_chan->lock);
+       }
+
+       /* Re-Enable BDMA channel interrupts */
+       iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+       struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+       dma_cookie_t cookie;
+
+       spin_lock_bh(&bdma_chan->lock);
+
+       cookie = txd->chan->cookie;
+       if (++cookie < 0)
+               cookie = 1;
+       txd->chan->cookie = cookie;
+       txd->cookie = cookie;
+
+       if (list_empty(&bdma_chan->active_list)) {
+               list_add_tail(&desc->desc_node, &bdma_chan->active_list);
+               tsi721_start_dma(bdma_chan);
+       } else {
+               list_add_tail(&desc->desc_node, &bdma_chan->queue);
+       }
+
+       spin_unlock_bh(&bdma_chan->lock);
+       return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+       struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+       struct tsi721_tx_desc *desc = NULL;
+       LIST_HEAD(tmp_list);
+       int i;
+       int rc;
+
+       if (bdma_chan->bd_base)
+               return bdma_chan->bd_num - 1;
+
+       /* Initialize BDMA channel */
+       if (tsi721_bdma_ch_init(bdma_chan)) {
+               dev_err(dchan->device->dev, "Unable to initialize data DMA"
+                       " channel %d, aborting\n", bdma_chan->id);
+               return -ENOMEM;
+       }
+
+       /* Alocate matching number of logical descriptors */
+       desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+                       GFP_KERNEL);
+       if (!desc) {
+               dev_err(dchan->device->dev,
+                       "Failed to allocate logical descriptors\n");
+               rc = -ENOMEM;
+               goto err_out;
+       }
+
+       bdma_chan->tx_desc = desc;
+
+       for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+               dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+               desc[i].txd.tx_submit = tsi721_tx_submit;
+               desc[i].txd.flags = DMA_CTRL_ACK;
+               INIT_LIST_HEAD(&desc[i].tx_list);
+               list_add_tail(&desc[i].desc_node, &tmp_list);
+       }
+
+       spin_lock_bh(&bdma_chan->lock);
+       list_splice(&tmp_list, &bdma_chan->free_list);
+       bdma_chan->completed_cookie = dchan->cookie = 1;
+       spin_unlock_bh(&bdma_chan->lock);
+
+#ifdef CONFIG_PCI_MSI
+       if (priv->flags & TSI721_USING_MSIX) {
+               /* Request interrupt service if we are in MSI-X mode */
+               rc = request_irq(
+                       priv->msix[TSI721_VECT_DMA0_DONE +
+                                  bdma_chan->id].vector,
+                       tsi721_bdma_msix, 0,
+                       priv->msix[TSI721_VECT_DMA0_DONE +
+                                  bdma_chan->id].irq_name,
+                       (void *)bdma_chan);
+
+               if (rc) {
+                       dev_dbg(dchan->device->dev,
+                               "Unable to allocate MSI-X interrupt for "
+                               "BDMA%d-DONE\n", bdma_chan->id);
+                       goto err_out;
+               }
+
+               rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
+                                           bdma_chan->id].vector,
+                               tsi721_bdma_msix, 0,
+                               priv->msix[TSI721_VECT_DMA0_INT +
+                                          bdma_chan->id].irq_name,
+                               (void *)bdma_chan);
+
+               if (rc) {
+                       dev_dbg(dchan->device->dev,
+                               "Unable to allocate MSI-X interrupt for "
+                               "BDMA%d-INT\n", bdma_chan->id);
+                       free_irq(
+                               priv->msix[TSI721_VECT_DMA0_DONE +
+                                          bdma_chan->id].vector,
+                               (void *)bdma_chan);
+                       rc = -EIO;
+                       goto err_out;
+               }
+       }
+#endif /* CONFIG_PCI_MSI */
+
+       tasklet_enable(&bdma_chan->tasklet);
+       tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+       return bdma_chan->bd_num - 1;
+
+err_out:
+       kfree(desc);
+       tsi721_bdma_ch_free(bdma_chan);
+       return rc;
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+       struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+       LIST_HEAD(list);
+
+       dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+       if (bdma_chan->bd_base == NULL)
+               return;
+
+       BUG_ON(!list_empty(&bdma_chan->active_list));
+       BUG_ON(!list_empty(&bdma_chan->queue));
+
+       tasklet_disable(&bdma_chan->tasklet);
+
+       spin_lock_bh(&bdma_chan->lock);
+       list_splice_init(&bdma_chan->free_list, &list);
+       spin_unlock_bh(&bdma_chan->lock);
+
+       tsi721_bdma_interrupt_enable(bdma_chan, 0);
+
+#ifdef CONFIG_PCI_MSI
+       if (priv->flags & TSI721_USING_MSIX) {
+               free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+                                   bdma_chan->id].vector, (void *)bdma_chan);
+               free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+                                   bdma_chan->id].vector, (void *)bdma_chan);
+       }
+#endif /* CONFIG_PCI_MSI */
+
+       tsi721_bdma_ch_free(bdma_chan);
+       kfree(bdma_chan->tx_desc);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+                                struct dma_tx_state *txstate)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+       dma_cookie_t            last_used;
+       dma_cookie_t            last_completed;
+       int                     ret;
+
+       spin_lock_bh(&bdma_chan->lock);
+       last_completed = bdma_chan->completed_cookie;
+       last_used = dchan->cookie;
+       spin_unlock_bh(&bdma_chan->lock);
+
+       ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+       dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+       dev_dbg(dchan->device->dev,
+               "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
+               __func__, ret, last_completed, last_used);
+
+       return ret;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+       dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+       if (tsi721_dma_is_idle(bdma_chan)) {
+               spin_lock_bh(&bdma_chan->lock);
+               tsi721_advance_work(bdma_chan);
+               spin_unlock_bh(&bdma_chan->lock);
+       } else
+               dev_dbg(dchan->device->dev,
+                       "%s: DMA channel still busy\n", __func__);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+                       struct scatterlist *sgl, unsigned int sg_len,
+                       enum dma_transfer_direction dir, unsigned long flags,
+                       void *tinfo)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+       struct tsi721_tx_desc *desc = NULL;
+       struct tsi721_tx_desc *first = NULL;
+       struct scatterlist *sg;
+       struct rio_dma_ext *rext = tinfo;
+       u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
+       unsigned int i;
+       u32 sys_size = dma_to_mport(dchan->device)->sys_size;
+       enum dma_rtype rtype;
+
+       if (!sgl || !sg_len) {
+               dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
+               return NULL;
+       }
+
+       if (dir == DMA_DEV_TO_MEM)
+               rtype = NREAD;
+       else if (dir == DMA_MEM_TO_DEV) {
+               switch (rext->wr_type) {
+               case RDW_ALL_NWRITE:
+                       rtype = ALL_NWRITE;
+                       break;
+               case RDW_ALL_NWRITE_R:
+                       rtype = ALL_NWRITE_R;
+                       break;
+               case RDW_LAST_NWRITE_R:
+               default:
+                       rtype = LAST_NWRITE_R;
+                       break;
+               }
+       } else {
+               dev_err(dchan->device->dev,
+                       "%s: Unsupported DMA direction option\n", __func__);
+               return NULL;
+       }
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               int err;
+
+               dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+               desc = tsi721_desc_get(bdma_chan);
+               if (!desc) {
+                       dev_err(dchan->device->dev,
+                               "Not enough descriptors available\n");
+                       goto err_desc_get;
+               }
+
+               if (sg_is_last(sg))
+                       desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+               else
+                       desc->interrupt = false;
+
+               desc->destid = rext->destid;
+               desc->rio_addr = rio_addr;
+               desc->rio_addr_u = 0;
+
+               err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+               if (err) {
+                       dev_err(dchan->device->dev,
+                               "Failed to build desc: %d\n", err);
+                       goto err_desc_get;
+               }
+
+               rio_addr += sg_dma_len(sg);
+
+               if (!first)
+                       first = desc;
+               else
+                       list_add_tail(&desc->desc_node, &first->tx_list);
+       }
+
+       first->txd.cookie = -EBUSY;
+       desc->txd.flags = flags;
+
+       return &first->txd;
+
+err_desc_get:
+       tsi721_desc_put(bdma_chan, first);
+       return NULL;
+}
+
+static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+                            unsigned long arg)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+       struct tsi721_tx_desc *desc, *_d;
+       LIST_HEAD(list);
+
+       dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
+       spin_lock_bh(&bdma_chan->lock);
+
+       /* make sure to stop the transfer */
+       iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+
+       list_splice_init(&bdma_chan->active_list, &list);
+       list_splice_init(&bdma_chan->queue, &list);
+
+       list_for_each_entry_safe(desc, _d, &list, desc_node)
+               tsi721_dma_chain_complete(bdma_chan, desc);
+
+       spin_unlock_bh(&bdma_chan->lock);
+
+       return 0;
+}
+
+int __devinit tsi721_register_dma(struct tsi721_device *priv)
+{
+       int i;
+       int nr_channels = TSI721_DMA_MAXCH;
+       int err;
+       struct rio_mport *mport = priv->mport;
+
+       mport->dma.dev = &priv->pdev->dev;
+       mport->dma.chancnt = nr_channels;
+
+       INIT_LIST_HEAD(&mport->dma.channels);
+
+       for (i = 0; i < nr_channels; i++) {
+               struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+               if (i == TSI721_DMACH_MAINT)
+                       continue;
+
+               bdma_chan->bd_num = 64;
+               bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+               bdma_chan->dchan.device = &mport->dma;
+               bdma_chan->dchan.cookie = 1;
+               bdma_chan->dchan.chan_id = i;
+               bdma_chan->id = i;
+
+               spin_lock_init(&bdma_chan->lock);
+
+               INIT_LIST_HEAD(&bdma_chan->active_list);
+               INIT_LIST_HEAD(&bdma_chan->queue);
+               INIT_LIST_HEAD(&bdma_chan->free_list);
+
+               tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+                            (unsigned long)bdma_chan);
+               tasklet_disable(&bdma_chan->tasklet);
+               list_add_tail(&bdma_chan->dchan.device_node,
+                             &mport->dma.channels);
+       }
+
+       dma_cap_zero(mport->dma.cap_mask);
+       dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+       dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+       mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+       mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+       mport->dma.device_tx_status = tsi721_tx_status;
+       mport->dma.device_issue_pending = tsi721_issue_pending;
+       mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+       mport->dma.device_control = tsi721_device_control;
+
+       err = dma_async_device_register(&mport->dma);
+       if (err)
+               dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+
+       return err;
+}
index 86c9a091a2ffdbfb3cac0868f17824ee9f4e5b6d..c40665a4fa3347a8b9bdb1edd12c4b48eacb5d43 100644 (file)
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
        return 0;
 }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+static bool rio_chan_filter(struct dma_chan *chan, void *arg)
+{
+       struct rio_dev *rdev = arg;
+
+       /* Check that DMA device belongs to the right MPORT */
+       return (rdev->net->hport ==
+               container_of(chan->device, struct rio_mport, dma));
+}
+
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ *   specified target RapidIO device.
+ * @rdev: RIO device control structure
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+       dma_cap_mask_t mask;
+       struct dma_chan *dchan;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+
+       return dchan;
+}
+EXPORT_SYMBOL_GPL(rio_request_dma);
+
+/**
+ * rio_release_dma - release specified DMA channel
+ * @dchan: DMA channel to release
+ */
+void rio_release_dma(struct dma_chan *dchan)
+{
+       dma_release_channel(dchan);
+}
+EXPORT_SYMBOL_GPL(rio_release_dma);
+
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ *   for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+       struct dma_chan *dchan, struct rio_dma_data *data,
+       enum dma_transfer_direction direction, unsigned long flags)
+{
+       struct dma_async_tx_descriptor *txd = NULL;
+       struct rio_dma_ext rio_ext;
+
+       if (dchan->device->device_prep_slave_sg == NULL) {
+               pr_err("%s: prep_rio_sg == NULL\n", __func__);
+               return NULL;
+       }
+
+       rio_ext.destid = rdev->destid;
+       rio_ext.rio_addr_u = data->rio_addr_u;
+       rio_ext.rio_addr = data->rio_addr;
+       rio_ext.wr_type = data->wr_type;
+
+       txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+                                       direction, flags, &rio_ext);
+
+       return txd;
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 static void rio_fixup_device(struct rio_dev *dev)
 {
 }
index e1b8c54ace5a2a68e0d4f3ee28f36ff626f06645..a739f5ca936a878481f408d67e0a7edf8688a42e 100644 (file)
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
 }
 
 static struct of_regulator_match ab8500_regulator_matches[] = {
-       { .name = "LDO-AUX1",    .driver_data = (void *) AB8500_LDO_AUX1, },
-       { .name = "LDO-AUX2",    .driver_data = (void *) AB8500_LDO_AUX2, },
-       { .name = "LDO-AUX3",    .driver_data = (void *) AB8500_LDO_AUX3, },
-       { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
-       { .name = "LDO-TVOUT",   .driver_data = (void *) AB8500_LDO_TVOUT, },
-       { .name = "LDO-USB",     .driver_data = (void *) AB8500_LDO_USB, },
-       { .name = "LDO-AUDIO",   .driver_data = (void *) AB8500_LDO_AUDIO, },
-       { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
-       { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
-       { .name = "LDO-DMIC",    .driver_data = (void *) AB8500_LDO_DMIC, },
-       { .name = "LDO-ANA",     .driver_data = (void *) AB8500_LDO_ANA, },
+       { .name = "ab8500_ldo_aux1",    .driver_data = (void *) AB8500_LDO_AUX1, },
+       { .name = "ab8500_ldo_aux2",    .driver_data = (void *) AB8500_LDO_AUX2, },
+       { .name = "ab8500_ldo_aux3",    .driver_data = (void *) AB8500_LDO_AUX3, },
+       { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+       { .name = "ab8500_ldo_tvout",   .driver_data = (void *) AB8500_LDO_TVOUT, },
+       { .name = "ab8500_ldo_usb",     .driver_data = (void *) AB8500_LDO_USB, },
+       { .name = "ab8500_ldo_audio",   .driver_data = (void *) AB8500_LDO_AUDIO, },
+       { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+       { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+       { .name = "ab8500_ldo_dmic",    .driver_data = (void *) AB8500_LDO_DMIC, },
+       { .name = "ab8500_ldo_ana",     .driver_data = (void *) AB8500_LDO_ANA, },
 };
 
 static __devinit int
index 3660bace123c97adc3f3ce4e1165216655578a3d..e82e7eaac0f18fd2a50ba3d5506c5620e3e50183 100644 (file)
@@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = {
                .of_match_table = of_anatop_regulator_match_tbl,
        },
        .probe  = anatop_regulator_probe,
-       .remove = anatop_regulator_remove,
+       .remove = __devexit_p(anatop_regulator_remove),
 };
 
 static int __init anatop_regulator_init(void)
index 7584a74eec8a4f7706a59e9013e40bab34d30ef8..8b4b3829d9e719f1acb57ef0d5b158beeefe914d 100644 (file)
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
+       if (min_uV < rdev->desc->min_uV)
+               min_uV = rdev->desc->min_uV;
+
        ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
        if (ret < 0)
                return ret;
@@ -2516,9 +2519,12 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
 {
        struct regulator_dev *rdev = regulator->rdev;
        struct regulator *consumer;
-       int ret, output_uV, input_uV, total_uA_load = 0;
+       int ret, output_uV, input_uV = 0, total_uA_load = 0;
        unsigned int mode;
 
+       if (rdev->supply)
+               input_uV = regulator_get_voltage(rdev->supply);
+
        mutex_lock(&rdev->mutex);
 
        /*
@@ -2551,10 +2557,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
                goto out;
        }
 
-       /* get input voltage */
-       input_uV = 0;
-       if (rdev->supply)
-               input_uV = regulator_get_voltage(rdev->supply);
+       /* No supply? Use constraint voltage */
        if (input_uV <= 0)
                input_uV = rdev->constraints->input_uV;
        if (input_uV <= 0) {
index 968f97f3cb3d5ec8cc299a55aafbdd4bcc09e705..9dbb491b6efa827a64198baccfc42f6fe6612f79 100644 (file)
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
 }
 
 static struct of_regulator_match db8500_regulator_matches[] = {
-       { .name = "db8500-vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
-       { .name = "db8500-varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
-       { .name = "db8500-vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
-       { .name = "db8500-vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
-       { .name = "db8500-vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
-       { .name = "db8500-vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
-       { .name = "db8500-vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
-       { .name = "db8500-vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
-       { .name = "db8500-sva-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
-       { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
-       { .name = "db8500-sva-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
-       { .name = "db8500-sia-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
-       { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
-       { .name = "db8500-sia-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
-       { .name = "db8500-sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
-       { .name = "db8500-b2r2-mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
-       { .name = "db8500-esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
-       { .name = "db8500-esram12-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
-       { .name = "db8500-esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
-       { .name = "db8500-esram34-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+       { .name = "db8500_vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+       { .name = "db8500_varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
+       { .name = "db8500_vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+       { .name = "db8500_vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+       { .name = "db8500_vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+       { .name = "db8500_vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+       { .name = "db8500_vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+       { .name = "db8500_vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+       { .name = "db8500_sva_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+       { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+       { .name = "db8500_sva_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+       { .name = "db8500_sia_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+       { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+       { .name = "db8500_sia_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+       { .name = "db8500_sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+       { .name = "db8500_b2r2_mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+       { .name = "db8500_esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+       { .name = "db8500_esram12_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+       { .name = "db8500_esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+       { .name = "db8500_esram34_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
 };
 
 static __devinit int
index 9997d7aaca84294d03462f428b606d93de39a9df..242851a4c1a606d69b41efad88f74dc1060817db 100644 (file)
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
 }
 
 static int gpio_regulator_set_value(struct regulator_dev *dev,
-                                       int min, int max)
+                                       int min, int max, unsigned *selector)
 {
        struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-       int ptr, target, state, best_val = INT_MAX;
+       int ptr, target = 0, state, best_val = INT_MAX;
 
        for (ptr = 0; ptr < data->nr_states; ptr++)
                if (data->states[ptr].value < best_val &&
                    data->states[ptr].value >= min &&
-                   data->states[ptr].value <= max)
+                   data->states[ptr].value <= max) {
                        target = data->states[ptr].gpios;
+                       best_val = data->states[ptr].value;
+                       if (selector)
+                               *selector = ptr;
+               }
 
        if (best_val == INT_MAX)
                return -EINVAL;
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
                                        int min_uV, int max_uV,
                                        unsigned *selector)
 {
-       return gpio_regulator_set_value(dev, min_uV, max_uV);
+       return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
 }
 
 static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
 static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
                                        int min_uA, int max_uA)
 {
-       return gpio_regulator_set_value(dev, min_uA, max_uA);
+       return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
 }
 
 static struct regulator_ops gpio_regulator_voltage_ops = {
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
 
        cfg.dev = &pdev->dev;
        cfg.init_data = config->init_data;
-       cfg.driver_data = &drvdata;
+       cfg.driver_data = drvdata;
 
        drvdata->dev = regulator_register(&drvdata->desc, &cfg);
        if (IS_ERR(drvdata->dev)) {
index 1f4bb80457b3a8a153a4ae9c44f46196ec8733db..9d540cd02dab5f4de264dc35599322b773d6c484 100644 (file)
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
        config.dev = &client->dev;
        config.init_data = pdata->regulator;
        config.driver_data = info;
+       config.regmap = info->regmap;
 
        info->regulator = regulator_register(&dcdc_desc, &config);
        if (IS_ERR(info->regulator)) {
index c4435f608df7e2210dedbbaa3e3bfd0ac2f63de9..795f75a6ac3342537a18600e2d0414b1f011caaf 100644 (file)
@@ -673,7 +673,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
                        pmic->desc[id].ops = &palmas_ops_smps10;
                        pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
                        pmic->desc[id].vsel_mask = SMPS10_VSEL;
-                       pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+                       pmic->desc[id].enable_reg =
+                                       PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+                                                       PALMAS_SMPS10_STATUS);
                        pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
                }
 
@@ -739,7 +741,8 @@ static __devinit int palmas_probe(struct platform_device *pdev)
 
                pmic->desc[id].type = REGULATOR_VOLTAGE;
                pmic->desc[id].owner = THIS_MODULE;
-               pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+               pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+                                               palmas_regs_info[id].ctrl_addr);
                pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
 
                if (pdata && pdata->reg_data)
@@ -775,9 +778,6 @@ static __devinit int palmas_probe(struct platform_device *pdev)
 err_unregister_regulator:
        while (--id >= 0)
                regulator_unregister(pmic->rdev[id]);
-       kfree(pmic->rdev);
-       kfree(pmic->desc);
-       kfree(pmic);
        return ret;
 }
 
@@ -788,10 +788,6 @@ static int __devexit palmas_remove(struct platform_device *pdev)
 
        for (id = 0; id < PALMAS_NUM_REGS; id++)
                regulator_unregister(pmic->rdev[id]);
-
-       kfree(pmic->rdev);
-       kfree(pmic->desc);
-       kfree(pmic);
        return 0;
 }
 
index 290d6fc01029a92cbb4a9e7a4783d6cebaa0da02..9caadb4821786308aa624e9ef03cd7408dc2083f 100644 (file)
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
 
        desc = reg_voltage_map[reg_id];
 
-       if (old_sel < new_sel)
+       if ((old_sel < new_sel) && s5m8767->ramp_delay)
                return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
                                        s5m8767->ramp_delay * 1000);
        return 0;
index f841bd0db6aabe523d5c4c8089743c448c72138f..8f1be8586c724a99061092d787273573eda5644e 100644 (file)
@@ -71,7 +71,7 @@
 
 /* LDO_CTRL bitfields */
 #define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id)   ((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)    (0x0F << ((ldo_id)*4))
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)    (0x07 << ((ldo_id)*4))
 
 /* Number of step-down converters available */
 #define TPS65023_NUM_DCDC              3
index b88b3df82381dc15bb81288c32874480a3eaad94..1b299aacf22ffa2023652c5a45cf61ac1a1c1416 100644 (file)
@@ -482,7 +482,7 @@ static int get_voltage_sel(struct regulator_dev *rdev)
        info    = &supply_info[rdev_get_id(rdev)];
 
        if (info->flags & FIXED_VOLTAGE)
-               return info->fixed_voltage;
+               return 0;
 
        ret = read_field(hw, &info->voltage);
        if (ret < 0)
index 24d880e78ec6704e8b84e6d0f6547602f1f639bb..f8d818abf98caf4dc8275a65e18695c41c639537 100644 (file)
@@ -4,9 +4,11 @@ menu "Remoteproc drivers (EXPERIMENTAL)"
 config REMOTEPROC
        tristate
        depends on EXPERIMENTAL
+       select FW_CONFIG
 
 config OMAP_REMOTEPROC
        tristate "OMAP remoteproc support"
+       depends on EXPERIMENTAL
        depends on ARCH_OMAP4
        depends on OMAP_IOMMU
        select REMOTEPROC
index 69425c4e86f30323e765d59151c0ab6d7517355c..de138e30d3e6ca0417ef444732721c14290f959c 100644 (file)
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
 
        ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (ret) {
-               dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+               dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
                return ret;
        }
 
index 8ea7bccc71007fd94132cf7c65da5f5ed5095a12..66324ee4678f45984348519813119dc41d71ade9 100644 (file)
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
                }
 
                if (offset + filesz > len) {
-                       dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+                       dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
                                        offset + filesz, len);
                        ret = -EINVAL;
                        break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
                unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
                if (unmapped != entry->len) {
                        /* nothing much to do besides complaining */
-                       dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+                       dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
                                                                unmapped);
                }
 
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
 
        ehdr = (struct elf32_hdr *)fw->data;
 
-       dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+       dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
 
        /*
         * if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
 
        /* look for the resource table */
        table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
-       if (!table)
+       if (!table) {
+               ret = -EINVAL;
                goto clean_up;
+       }
 
        /* handle fw resources which are required to boot rproc */
        ret = rproc_handle_boot_rsc(rproc, table, tablesz);
index 75506ec2840e2ec4d1e331f209377d874f604386..f56c8ba3a861cda16870e7456391fe564ec741c6 100644 (file)
@@ -188,6 +188,26 @@ static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
                                        rpdev->id.name);
 }
 
+/**
+ * __ept_release() - deallocate an rpmsg endpoint
+ * @kref: the ept's reference count
+ *
+ * This function deallocates an ept, and is invoked when its @kref refcount
+ * drops to zero.
+ *
+ * Never invoke this function directly!
+ */
+static void __ept_release(struct kref *kref)
+{
+       struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+                                                 refcount);
+       /*
+        * At this point no one holds a reference to ept anymore,
+        * so we can directly free it
+        */
+       kfree(ept);
+}
+
 /* for more info, see below documentation of rpmsg_create_ept() */
 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
@@ -206,6 +226,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                return NULL;
        }
 
+       kref_init(&ept->refcount);
+       mutex_init(&ept->cb_lock);
+
        ept->rpdev = rpdev;
        ept->cb = cb;
        ept->priv = priv;
@@ -238,7 +261,7 @@ rem_idr:
        idr_remove(&vrp->endpoints, request);
 free_ept:
        mutex_unlock(&vrp->endpoints_lock);
-       kfree(ept);
+       kref_put(&ept->refcount, __ept_release);
        return NULL;
 }
 
@@ -302,11 +325,17 @@ EXPORT_SYMBOL(rpmsg_create_ept);
 static void
 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
 {
+       /* make sure new inbound messages can't find this ept anymore */
        mutex_lock(&vrp->endpoints_lock);
        idr_remove(&vrp->endpoints, ept->addr);
        mutex_unlock(&vrp->endpoints_lock);
 
-       kfree(ept);
+       /* make sure in-flight inbound messages won't invoke cb anymore */
+       mutex_lock(&ept->cb_lock);
+       ept->cb = NULL;
+       mutex_unlock(&ept->cb_lock);
+
+       kref_put(&ept->refcount, __ept_release);
 }
 
 /**
@@ -790,12 +819,28 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
 
        /* use the dst addr to fetch the callback of the appropriate user */
        mutex_lock(&vrp->endpoints_lock);
+
        ept = idr_find(&vrp->endpoints, msg->dst);
+
+       /* let's make sure no one deallocates ept while we use it */
+       if (ept)
+               kref_get(&ept->refcount);
+
        mutex_unlock(&vrp->endpoints_lock);
 
-       if (ept && ept->cb)
-               ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
-       else
+       if (ept) {
+               /* make sure ept->cb doesn't go away while we use it */
+               mutex_lock(&ept->cb_lock);
+
+               if (ept->cb)
+                       ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
+                               msg->src);
+
+               mutex_unlock(&ept->cb_lock);
+
+               /* farewell, ept, we don't need you anymore */
+               kref_put(&ept->refcount, __ept_release);
+       } else
                dev_warn(dev, "msg received with no recepient\n");
 
        /* publish the real size of the buffer */
@@ -1040,7 +1085,7 @@ static int __init rpmsg_init(void)
 
        return ret;
 }
-module_init(rpmsg_init);
+subsys_initcall(rpmsg_init);
 
 static void __exit rpmsg_fini(void)
 {
index 4bcf9ca2818ae740eadd016df48cc0461da23f6e..370889d0489bf9e4f196abcbbcd6f11b25e6f5a8 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 
 #define AB8500_RTC_SOFF_STAT_REG       0x00
 #define AB8500_RTC_CC_CONF_REG         0x01
@@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
        }
 
        err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
-               IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
+               IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc);
        if (err < 0) {
                rtc_device_unregister(rtc);
                return err;
@@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
-
        err = ab8500_sysfs_rtc_register(&pdev->dev);
        if (err) {
                dev_err(&pdev->dev, "sysfs RTC failed to register\n");
@@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ab8500_rtc_match[] = {
+       { .compatible = "stericsson,ab8500-rtc", },
+       {}
+};
+
 static struct platform_driver ab8500_rtc_driver = {
        .driver = {
                .name = "ab8500-rtc",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_rtc_match,
        },
        .probe  = ab8500_rtc_probe,
        .remove = __devexit_p(ab8500_rtc_remove),
index 7d5f56edb8efc30e8beb70cbad177a0cca32dc35..4267789ca9959413e90df5ea053154e07481d3ce 100644 (file)
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
 
 static u32 rtc_handler(void *context)
 {
+       struct device *dev = context;
+
+       pm_wakeup_event(dev, 0);
        acpi_clear_event(ACPI_EVENT_RTC);
        acpi_disable_event(ACPI_EVENT_RTC, 0);
        return ACPI_INTERRUPT_HANDLED;
 }
 
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
 {
-       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
        /*
         * After the RTC handler is installed, the Fixed_RTC event should
         * be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
        if (acpi_disabled)
                return;
 
-       rtc_wake_setup();
+       rtc_wake_setup(dev);
        acpi_rtc_info.wake_on = rtc_wake_on;
        acpi_rtc_info.wake_off = rtc_wake_off;
 
index 5e1d64ee52289b9e7a47990f8c7de0b41c7fccaf..e3e50d69baf85e75966bdea6d7f7b95ae6d65af0 100644 (file)
@@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
        struct platform_device *pdev = dev_id;
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        void __iomem *ioaddr = pdata->ioaddr;
+       unsigned long flags;
        u32 status;
        u32 events = 0;
 
-       spin_lock_irq(&pdata->rtc->irq_lock);
+       spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
        status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
        /* clear interrupt sources */
        writew(status, ioaddr + RTC_RTCISR);
@@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
                events |= (RTC_PF | RTC_IRQF);
 
        rtc_update_irq(pdata->rtc, 1, events);
-       spin_unlock_irq(&pdata->rtc->irq_lock);
+       spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
 
        return IRQ_HANDLED;
 }
index 1f76320e545b1cf15d563cc8996b8eed7122f14a..e2785479113ca06648949d6c6a78adee92c78367 100644 (file)
@@ -458,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev)
        clk_disable(config->clk);
        clk_put(config->clk);
        iounmap(config->ioaddr);
-       kfree(config);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res)
                release_mem_region(res->start, resource_size(res));
        platform_set_drvdata(pdev, NULL);
        rtc_device_unregister(config->rtc);
+       kfree(config);
 
        return 0;
 }
index 258abeabf6246a1ed7e4db116713bb3128009fa0..c5d06fe83bba6274f2e6b944a43489395e04e01c 100644 (file)
@@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        }
 
        ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
-                                  IRQF_TRIGGER_RISING,
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
                                   dev_name(&rtc->dev), rtc);
        if (ret < 0) {
                dev_err(&pdev->dev, "IRQ is not free.\n");
index 33a6743ddc558c11e9b07e09e230efb40e80da28..c05da00583f06c94f5fbb12bb110c7f5a60875c8 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef DASD_INT_H
 #define DASD_INT_H
 
-#ifdef __KERNEL__
-
 /* we keep old device allocation scheme; IOW, minors are still in 0..255 */
 #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
 #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -791,6 +789,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
 #define dasd_eer_enabled(d)    (0)
 #endif /* CONFIG_DASD_ERR */
 
-#endif                         /* __KERNEL__ */
-
 #endif                         /* DASD_H */
index 69e6c50d4cfb25c341d0e115d2ad2d4a3034520a..50f7115990fffc3954cc99f24bdd06d1caa4fa7a 100644 (file)
@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
        sccb.evbuf.event_qual = EQ_STORE_DATA;
        sccb.evbuf.data_id = DI_FCP_DUMP;
        sccb.evbuf.event_id = 4712;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        sccb.evbuf.asa_size = ASA_SIZE_64;
 #else
        sccb.evbuf.asa_size = ASA_SIZE_32;
index 532d212b6b2c7b633a9b4338da14c78439a1d3db..393e7ce8e95a15ef5907c820dcb2ff9bd3542ac8 100644 (file)
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
 
                if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
                        resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
-                       memcpy(&resp->ending_fis[0], r+16, 24);
+                       memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
                        ts->buf_valid_size = sizeof(*resp);
                }
        }
index 01bb04cd9e7516a567b714fcd1f8a1896096aba9..2a096795b9aa86768ef18561872d948800eb6402 100644 (file)
@@ -571,13 +571,12 @@ free_cmd:
 static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
                               int iscsi_cmd, int size)
 {
-       cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
-                                      &cmd->dma);
+       cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
        if (!cmd->va) {
                SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
                return -ENOMEM;
        }
-       memset(cmd->va, 0, sizeof(size));
+       memset(cmd->va, 0, size);
        cmd->size = size;
        be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
        return 0;
index 8b6c6bf7837e7e09a0d6fec5d88733b3477a0d77..b83927440171810b4d6442c992c2c01a5db4558d 100644 (file)
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
                vshost = vport->drv_port.im_port->shost;
                fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
                fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+               fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+               memset(fc_host_supported_fc4s(vshost), 0,
+                       sizeof(fc_host_supported_fc4s(vshost)));
+
+               /* For FCP type 0x08 */
+               if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+                       fc_host_supported_fc4s(vshost)[2] = 1;
+
+               /* For fibre channel services type 0x20 */
+               fc_host_supported_fc4s(vshost)[7] = 1;
+
+               fc_host_supported_speeds(vshost) =
+                               bfad_im_supported_speeds(&bfad->bfa);
+               fc_host_maxframe_size(vshost) =
+                               bfa_fcport_get_maxfrsize(&bfad->bfa);
+
                fc_vport->dd_data = vport;
                vport->drv_port.im_port->fc_vport = fc_vport;
        } else if (rc == BFA_STATUS_INVALID_WWN)
index 3153923f5b6027f1c16d806e14092e1df5356218..1ac09afe35ee17a6a23916e41ede98cd3f63a2ad 100644 (file)
@@ -987,7 +987,7 @@ done:
        return 0;
 }
 
-static u32
+u32
 bfad_im_supported_speeds(struct bfa_s *bfa)
 {
        struct bfa_ioc_attr_s *ioc_attr;
index 0814367ef101a1c075c0cfd4f5a52bc34dea920d..f6c1023e502a13cd04f19551d4893ec5f9014129 100644 (file)
@@ -37,6 +37,7 @@ int  bfad_im_scsi_host_alloc(struct bfad_s *bfad,
                struct bfad_im_port_s *im_port, struct device *dev);
 void bfad_im_scsi_host_free(struct bfad_s *bfad,
                                struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
 
 #define MAX_FCP_TARGET 1024
 #define MAX_FCP_LUN 16384
index a4953ef9e53accf67b5933e3bc1259511f31fa22..0578fa0dc14b73e6c26113d163759e1fe33b735d 100644 (file)
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.10"
+#define BNX2FC_VERSION         "1.0.11"
 
 #define PFX                    "bnx2fc: "
 
@@ -228,13 +228,16 @@ struct bnx2fc_interface {
        struct packet_type fip_packet_type;
        struct workqueue_struct *timer_work_queue;
        struct kref kref;
-       struct fcoe_ctlr ctlr;
        u8 vlan_enabled;
        int vlan_id;
        bool enabled;
 };
 
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
+#define bnx2fc_from_ctlr(x)                    \
+       ((struct bnx2fc_interface *)((x) + 1))
+
+#define bnx2fc_to_ctlr(x)                                      \
+       ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
 
 struct bnx2fc_lport {
        struct list_head list;
index ce0ce3e32f336aaf711d6129a781c0aa035120fa..bdbbb13b8534c2318464b1b9a803c554af2b17ac 100644 (file)
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
        struct fc_exch *exch = fc_seq_exch(seq);
        struct fc_lport *lport = exch->lp;
        u8 *mac;
-       struct fc_frame_header *fh;
        u8 op;
 
        if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 
        mac = fr_cb(fp)->granted_mac;
        if (is_zero_ether_addr(mac)) {
-               fh = fc_frame_header_get(fp);
-               if (fh->fh_type != FC_TYPE_ELS) {
-                       printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
-                               "fh_type != FC_TYPE_ELS\n");
-                       fc_frame_free(fp);
-                       return;
-               }
                op = fc_frame_payload_op(fp);
                if (lport->vport) {
                        if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
                                return;
                        }
                }
-               if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
-                       fc_frame_free(fp);
-                       return;
-               }
+               fcoe_ctlr_recv_flogi(fip, lport, fp);
        }
-       fip->update_mac(lport, mac);
+       if (!is_zero_ether_addr(mac))
+               fip->update_mac(lport, mac);
 done:
        fc_lport_flogi_resp(seq, fp, lport);
 }
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
 {
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_interface *interface = port->priv;
-       struct fcoe_ctlr *fip = &interface->ctlr;
+       struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
        struct fc_frame_header *fh = fc_frame_header_get(fp);
 
        switch (op) {
index c1c6a92a0b989737c9f8a15b81e8e24cd86b758e..f52f668fd247b5601e27ac9572c99b6832302ca8 100644 (file)
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Jan 22, 2011"
+#define DRV_MODULE_RELDATE     "Apr 24, 2012"
 
 
 static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
 static struct libfc_function_template bnx2fc_libfc_fcn_templ;
 static struct scsi_host_template bnx2fc_shost_template;
 static struct fc_function_template bnx2fc_transport_function;
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
 static struct fc_function_template bnx2fc_vport_xport_function;
 static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
 static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
 static void bnx2fc_stop(struct bnx2fc_interface *interface);
 static int __init bnx2fc_mod_init(void);
 static void __exit bnx2fc_mod_exit(void);
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
 
 unsigned int bnx2fc_debug_level;
 module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
        __fcoe_get_lesb(lport, fc_lesb, netdev);
 }
 
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct net_device *netdev = bnx2fc_netdev(fip->lp);
+       struct fcoe_fc_els_lesb *fcoe_lesb;
+       struct fc_els_lesb fc_lesb;
+
+       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+       ctlr_dev->lesb.lesb_link_fail =
+               ntohl(fcoe_lesb->lesb_link_fail);
+       ctlr_dev->lesb.lesb_vlink_fail =
+               ntohl(fcoe_lesb->lesb_vlink_fail);
+       ctlr_dev->lesb.lesb_miss_fka =
+               ntohl(fcoe_lesb->lesb_miss_fka);
+       ctlr_dev->lesb.lesb_symb_err =
+               ntohl(fcoe_lesb->lesb_symb_err);
+       ctlr_dev->lesb.lesb_err_block =
+               ntohl(fcoe_lesb->lesb_err_block);
+       ctlr_dev->lesb.lesb_fcs_error =
+               ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
+
+static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+       struct fcoe_ctlr_device *ctlr_dev =
+               fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+       fcf_dev->vlan_id = fcoe->vlan_id;
+}
+
 static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
 {
        struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        struct sk_buff          *skb;
        struct fc_frame_header  *fh;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr        *ctlr;
        struct bnx2fc_hba *hba;
        struct fcoe_port        *port;
        struct fcoe_hdr         *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
 
        port = (struct fcoe_port *)lport_priv(lport);
        interface = port->priv;
+       ctlr = bnx2fc_to_ctlr(interface);
        hba = interface->hba;
 
        fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        }
 
        if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
-               if (!interface->ctlr.sel_fcf) {
+               if (!ctlr->sel_fcf) {
                        BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
                        kfree_skb(skb);
                        return -EINVAL;
                }
-               if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
+               if (fcoe_ctlr_els_send(ctlr, lport, skb))
                        return 0;
        }
 
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        /* fill up mac and fcoe headers */
        eh = eth_hdr(skb);
        eh->h_proto = htons(ETH_P_FCOE);
-       if (interface->ctlr.map_dest)
+       if (ctlr->map_dest)
                fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
        else
                /* insert GW address */
-               memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
+               memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
 
-       if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
-               memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
+       if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+               memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
        else
                memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
 
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct fc_lport *lport;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct fc_frame_header *fh;
        struct fcoe_rcv_info *fr;
        struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
 
        interface = container_of(ptype, struct bnx2fc_interface,
                                 fcoe_packet_type);
-       lport = interface->ctlr.lp;
+       ctlr = bnx2fc_to_ctlr(interface);
+       lport = ctlr->lp;
 
        if (unlikely(lport == NULL)) {
                printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
 {
        struct bnx2fc_hba *hba;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
        u64 wwnn, wwpn;
 
        port = lport_priv(lport);
        interface = port->priv;
+       ctlr = bnx2fc_to_ctlr(interface);
        hba = interface->hba;
 
        /* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
 
        if (!lport->vport) {
                if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
-                       wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+                       wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
                                                 1, 0);
                BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
                fc_set_wwnn(lport, wwnn);
 
                if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
-                       wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+                       wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
                                                 2, 0);
 
                BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
        struct fc_lport *lport;
        struct fc_lport *vport;
        struct bnx2fc_interface *interface, *tmp;
+       struct fcoe_ctlr *ctlr;
        int wait_for_upload = 0;
        u32 link_possible = 1;
 
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                if (interface->hba != hba)
                        continue;
 
-               lport = interface->ctlr.lp;
+               ctlr = bnx2fc_to_ctlr(interface);
+               lport = ctlr->lp;
                BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
                                interface->netdev->name, event);
 
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                         * on a stale vlan
                         */
                        if (interface->enabled)
-                               fcoe_ctlr_link_up(&interface->ctlr);
-               } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
+                               fcoe_ctlr_link_up(ctlr);
+               } else if (fcoe_ctlr_link_down(ctlr)) {
                        mutex_lock(&lport->lp_mutex);
                        list_for_each_entry(vport, &lport->vports, list)
                                fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
                           struct net_device *orig_dev)
 {
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        interface = container_of(ptype, struct bnx2fc_interface,
                                 fip_packet_type);
-       fcoe_ctlr_recv(&interface->ctlr, skb);
+       ctlr = bnx2fc_to_ctlr(interface);
+       fcoe_ctlr_recv(ctlr, skb);
        return 0;
 }
 
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
 {
        struct net_device *netdev = interface->netdev;
        struct net_device *physdev = interface->hba->phys_dev;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct netdev_hw_addr *ha;
        int sel_san_mac = 0;
 
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
 
                if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
                    (is_valid_ether_addr(ha->addr))) {
-                       memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+                       memcpy(ctlr->ctl_src_addr, ha->addr,
                               ETH_ALEN);
                        sel_san_mac = 1;
                        BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
 
 static void bnx2fc_interface_release(struct kref *kref)
 {
+       struct fcoe_ctlr_device *ctlr_dev;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct net_device *netdev;
 
        interface = container_of(kref, struct bnx2fc_interface, kref);
        BNX2FC_MISC_DBG("Interface is being released\n");
 
+       ctlr = bnx2fc_to_ctlr(interface);
+       ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
        netdev = interface->netdev;
 
        /* tear-down FIP controller */
        if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
-               fcoe_ctlr_destroy(&interface->ctlr);
+               fcoe_ctlr_destroy(ctlr);
 
-       kfree(interface);
+       fcoe_ctlr_device_delete(ctlr_dev);
 
        dev_put(netdev);
        module_put(THIS_MODULE);
@@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
                                      struct net_device *netdev,
                                      enum fip_state fip_mode)
 {
+       struct fcoe_ctlr_device *ctlr_dev;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
+       int size;
        int rc = 0;
 
-       interface = kzalloc(sizeof(*interface), GFP_KERNEL);
-       if (!interface) {
+       size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
+       ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
+                                        size);
+       if (!ctlr_dev) {
                printk(KERN_ERR PFX "Unable to allocate interface structure\n");
                return NULL;
        }
+       ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       interface = fcoe_ctlr_priv(ctlr);
        dev_hold(netdev);
        kref_init(&interface->kref);
        interface->hba = hba;
        interface->netdev = netdev;
 
        /* Initialize FIP */
-       fcoe_ctlr_init(&interface->ctlr, fip_mode);
-       interface->ctlr.send = bnx2fc_fip_send;
-       interface->ctlr.update_mac = bnx2fc_update_src_mac;
-       interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+       fcoe_ctlr_init(ctlr, fip_mode);
+       ctlr->send = bnx2fc_fip_send;
+       ctlr->update_mac = bnx2fc_update_src_mac;
+       ctlr->get_src_addr = bnx2fc_get_src_mac;
        set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
 
        rc = bnx2fc_interface_setup(interface);
        if (!rc)
                return interface;
 
-       fcoe_ctlr_destroy(&interface->ctlr);
+       fcoe_ctlr_destroy(ctlr);
        dev_put(netdev);
-       kfree(interface);
+       fcoe_ctlr_device_delete(ctlr_dev);
        return NULL;
 }
 
@@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
 static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
                                  struct device *parent, int npiv)
 {
+       struct fcoe_ctlr        *ctlr = bnx2fc_to_ctlr(interface);
        struct fc_lport         *lport, *n_port;
        struct fcoe_port        *port;
        struct Scsi_Host        *shost;
@@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
 
        blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
        if (!blport) {
-               BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
+               BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
                return NULL;
        }
 
@@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
 
 static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
 {
-       struct fc_lport *lport = interface->ctlr.lp;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+       struct fc_lport *lport = ctlr->lp;
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_hba *hba = interface->hba;
 
@@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
 
 static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
 {
-       struct fc_lport *lport = interface->ctlr.lp;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+       struct fc_lport *lport = ctlr->lp;
        struct fcoe_port *port = lport_priv(lport);
 
        bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface = NULL;
        struct workqueue_struct *timer_work_queue;
+       struct fcoe_ctlr *ctlr;
        int rc = 0;
 
        rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
 
        interface = bnx2fc_interface_lookup(netdev);
-       if (!interface || !interface->ctlr.lp) {
+       ctlr = bnx2fc_to_ctlr(interface);
+       if (!interface || !ctlr->lp) {
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
                goto netdev_err;
@@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle)
 {
        struct bnx2fc_hba *hba = handle;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct fc_lport *lport;
 
        mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle)
 
        list_for_each_entry(interface, &if_list, list) {
                if (interface->hba == hba) {
-                       lport = interface->ctlr.lp;
+                       ctlr = bnx2fc_to_ctlr(interface);
+                       lport = ctlr->lp;
                        /* Kick off Fabric discovery*/
                        printk(KERN_ERR PFX "ulp_init: start discovery\n");
                        lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
 
 static void bnx2fc_stop(struct bnx2fc_interface *interface)
 {
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct fc_lport *lport;
        struct fc_lport *vport;
 
        if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
                return;
 
-       lport = interface->ctlr.lp;
+       lport = ctlr->lp;
        bnx2fc_port_shutdown(lport);
 
        mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
                                        FC_PORTTYPE_UNKNOWN;
        mutex_unlock(&lport->lp_mutex);
        fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
-       fcoe_ctlr_link_down(&interface->ctlr);
+       fcoe_ctlr_link_down(ctlr);
        fcoe_clean_pending_queue(lport);
 }
 
@@ -1804,6 +1871,7 @@ exit:
 
 static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
 {
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct fc_lport *lport;
        int wait_cnt = 0;
 
@@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
                return;
        }
 
-       lport = interface->ctlr.lp;
+       lport = ctlr->lp;
        BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
 
        if (!bnx2fc_link_ok(lport) && interface->enabled) {
                BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
-               fcoe_ctlr_link_up(&interface->ctlr);
+               fcoe_ctlr_link_up(ctlr);
                fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
                set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
        }
 
        /* wait for the FCF to be selected before issuing FLOGI */
-       while (!interface->ctlr.sel_fcf) {
+       while (!ctlr->sel_fcf) {
                msleep(250);
                /* give up after 3 secs */
                if (++wait_cnt > 12)
@@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
 static int bnx2fc_disable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        int rc = 0;
 
        rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
 
        interface = bnx2fc_interface_lookup(netdev);
-       if (!interface || !interface->ctlr.lp) {
+       ctlr = bnx2fc_to_ctlr(interface);
+       if (!interface || !ctlr->lp) {
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
        } else {
                interface->enabled = false;
-               fcoe_ctlr_link_down(&interface->ctlr);
-               fcoe_clean_pending_queue(interface->ctlr.lp);
+               fcoe_ctlr_link_down(ctlr);
+               fcoe_clean_pending_queue(ctlr->lp);
        }
 
        mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev)
 static int bnx2fc_enable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        int rc = 0;
 
        rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
 
        interface = bnx2fc_interface_lookup(netdev);
-       if (!interface || !interface->ctlr.lp) {
+       ctlr = bnx2fc_to_ctlr(interface);
+       if (!interface || !ctlr->lp) {
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
-       } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
-               fcoe_ctlr_link_up(&interface->ctlr);
+       } else if (!bnx2fc_link_ok(ctlr->lp)) {
+               fcoe_ctlr_link_up(ctlr);
                interface->enabled = true;
        }
 
@@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev)
  */
 static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
 {
+       struct fcoe_ctlr *ctlr;
        struct bnx2fc_interface *interface;
        struct bnx2fc_hba *hba;
        struct net_device *phys_dev;
@@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
                goto ifput_err;
        }
 
+       ctlr = bnx2fc_to_ctlr(interface);
        interface->vlan_id = vlan_id;
        interface->vlan_enabled = 1;
 
@@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
        lport->boot_time = jiffies;
 
        /* Make this master N_port */
-       interface->ctlr.lp = lport;
+       ctlr->lp = lport;
 
        if (!bnx2fc_link_ok(lport)) {
-               fcoe_ctlr_link_up(&interface->ctlr);
+               fcoe_ctlr_link_up(ctlr);
                fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
                set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
        }
@@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void)
 module_init(bnx2fc_mod_init);
 module_exit(bnx2fc_mod_exit);
 
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
+       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+       .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+
+       .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+       .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
+};
+
 static struct fc_function_template bnx2fc_transport_function = {
        .show_host_node_name = 1,
        .show_host_port_name = 1,
index afd570962b8c288636fad351671a376ad03992e7..2ca6bfe4ce5e38fe3a7be6b75cd39191c4a6ca2d 100644 (file)
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
 {
        struct fc_lport *lport = port->lport;
        struct bnx2fc_interface *interface = port->priv;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct bnx2fc_hba *hba = interface->hba;
        struct kwqe *kwqe_arr[4];
        struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
        ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
        ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
        ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
-       ofld_req4.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
+       ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
                                                        /* fcf mac */
-       ofld_req4.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-       ofld_req4.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-       ofld_req4.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-       ofld_req4.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-       ofld_req4.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+       ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+       ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+       ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+       ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+       ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
        ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
        ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
 {
        struct kwqe *kwqe_arr[2];
        struct bnx2fc_interface *interface = port->priv;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct bnx2fc_hba *hba = interface->hba;
        struct fcoe_kwqe_conn_enable_disable enbl_req;
        struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
        enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
        memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
 
-       enbl_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
-       enbl_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-       enbl_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-       enbl_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-       enbl_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-       enbl_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+       enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
+       enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
+       enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+       enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+       enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+       enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
        port_id = fc_host_port_id(lport->host);
        if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
                                    struct bnx2fc_rport *tgt)
 {
        struct bnx2fc_interface *interface = port->priv;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct bnx2fc_hba *hba = interface->hba;
        struct fcoe_kwqe_conn_enable_disable disable_req;
        struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
        disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
        disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 
-       disable_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
-       disable_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-       disable_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-       disable_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-       disable_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-       disable_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+       disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
+       disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
+       disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+       disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+       disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+       disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
        port_id = tgt->sid;
        disable_req.s_id[0] = (port_id & 0x000000FF);
index e897ce975bb8b15f7ae74f56d0028f1b0f6adb05..4f7453b9e41e2486b662d7fc3d8f55b1762fe154 100644 (file)
@@ -810,8 +810,22 @@ retry_tmf:
        spin_lock_bh(&tgt->tgt_lock);
 
        io_req->wait_for_comp = 0;
-       if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+       if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
                set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+               if (io_req->on_tmf_queue) {
+                       list_del_init(&io_req->link);
+                       io_req->on_tmf_queue = 0;
+               }
+               io_req->wait_for_comp = 1;
+               bnx2fc_initiate_cleanup(io_req);
+               spin_unlock_bh(&tgt->tgt_lock);
+               rc = wait_for_completion_timeout(&io_req->tm_done,
+                                                BNX2FC_FW_TIMEOUT);
+               spin_lock_bh(&tgt->tgt_lock);
+               io_req->wait_for_comp = 0;
+               if (!rc)
+                       kref_put(&io_req->refcount, bnx2fc_cmd_release);
+       }
 
        spin_unlock_bh(&tgt->tgt_lock);
 
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
        return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
+int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
+{
+       struct bnx2fc_rport *tgt = io_req->tgt;
+       struct fc_rport_priv *rdata = tgt->rdata;
+       int logo_issued;
+       int rc = SUCCESS;
+       int wait_cnt = 0;
+
+       BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+                     tgt->flags);
+       logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+                                      &tgt->flags);
+       io_req->wait_for_comp = 1;
+       bnx2fc_initiate_cleanup(io_req);
+
+       spin_unlock_bh(&tgt->tgt_lock);
+
+       wait_for_completion(&io_req->tm_done);
+
+       io_req->wait_for_comp = 0;
+       /*
+        * release the reference taken in eh_abort to allow the
+        * target to re-login after flushing IOs
+        */
+        kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+       if (!logo_issued) {
+               clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+               mutex_lock(&lport->disc.disc_mutex);
+               lport->tt.rport_logoff(rdata);
+               mutex_unlock(&lport->disc.disc_mutex);
+               do {
+                       msleep(BNX2FC_RELOGIN_WAIT_TIME);
+                       if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
+                               rc = FAILED;
+                               break;
+                       }
+               } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
+       }
+       spin_lock_bh(&tgt->tgt_lock);
+       return rc;
+}
 /**
  * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
  *                     SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        struct fc_rport_libfc_priv *rp = rport->dd_data;
        struct bnx2fc_cmd *io_req;
        struct fc_lport *lport;
-       struct fc_rport_priv *rdata;
        struct bnx2fc_rport *tgt;
-       int logo_issued;
-       int wait_cnt = 0;
        int rc = FAILED;
 
 
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        list_add_tail(&io_req->link, &tgt->io_retire_queue);
 
        init_completion(&io_req->tm_done);
-       io_req->wait_for_comp = 1;
 
-       if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
-               /* Cancel the current timer running on this io_req */
-               if (cancel_delayed_work(&io_req->timeout_work))
-                       kref_put(&io_req->refcount,
-                                bnx2fc_cmd_release); /* drop timer hold */
-               set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
-               rc = bnx2fc_initiate_abts(io_req);
-       } else {
+       if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
                printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
                                "already in abts processing\n", io_req->xid);
                if (cancel_delayed_work(&io_req->timeout_work))
                        kref_put(&io_req->refcount,
                                 bnx2fc_cmd_release); /* drop timer hold */
-               bnx2fc_initiate_cleanup(io_req);
+               rc = bnx2fc_expl_logo(lport, io_req);
+               goto out;
+       }
 
+       /* Cancel the current timer running on this io_req */
+       if (cancel_delayed_work(&io_req->timeout_work))
+               kref_put(&io_req->refcount,
+                        bnx2fc_cmd_release); /* drop timer hold */
+       set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+       io_req->wait_for_comp = 1;
+       rc = bnx2fc_initiate_abts(io_req);
+       if (rc == FAILED) {
+               bnx2fc_initiate_cleanup(io_req);
                spin_unlock_bh(&tgt->tgt_lock);
-
                wait_for_completion(&io_req->tm_done);
-
                spin_lock_bh(&tgt->tgt_lock);
                io_req->wait_for_comp = 0;
-               rdata = io_req->tgt->rdata;
-               logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
-                                              &tgt->flags);
-               kref_put(&io_req->refcount, bnx2fc_cmd_release);
-               spin_unlock_bh(&tgt->tgt_lock);
-
-               if (!logo_issued) {
-                       BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
-                                     tgt->flags);
-                       mutex_lock(&lport->disc.disc_mutex);
-                       lport->tt.rport_logoff(rdata);
-                       mutex_unlock(&lport->disc.disc_mutex);
-                       do {
-                               msleep(BNX2FC_RELOGIN_WAIT_TIME);
-                               /*
-                                * If session not recovered, let SCSI-ml
-                                * escalate error recovery.
-                                */
-                               if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
-                                       return FAILED;
-                       } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
-                                          &tgt->flags));
-               }
-               return SUCCESS;
-       }
-       if (rc == FAILED) {
-               kref_put(&io_req->refcount, bnx2fc_cmd_release);
-               spin_unlock_bh(&tgt->tgt_lock);
-               return rc;
+               goto done;
        }
        spin_unlock_bh(&tgt->tgt_lock);
 
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                /* Let the scsi-ml try to recover this command */
                printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
                       io_req->xid);
-               rc = FAILED;
+               rc = bnx2fc_expl_logo(lport, io_req);
+               goto out;
        } else {
                /*
                 * We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                bnx2fc_scsi_done(io_req, DID_ABORT);
                kref_put(&io_req->refcount, bnx2fc_cmd_release);
        }
-
+done:
        /* release the reference taken in eh_abort */
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
+out:
        spin_unlock_bh(&tgt->tgt_lock);
        return rc;
 }
index c1800b5312708a914cf424319dd7376fbb008186..082a25c3117e58cf961c383803e743ac92b7da24 100644 (file)
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
                BUG_ON(rc);
        }
 
+       list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
+               i++;
+               io_req = (struct bnx2fc_cmd *)list;
+               list_del_init(&io_req->link);
+               io_req->on_tmf_queue = 0;
+               BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
+               if (io_req->wait_for_comp)
+                       complete(&io_req->tm_done);
+       }
+
        list_for_each_safe(list, tmp, &tgt->els_queue) {
                i++;
                io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
 
                BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
 
-               if (cancel_delayed_work(&io_req->timeout_work))
+               if (cancel_delayed_work(&io_req->timeout_work)) {
+                       if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+                                               &io_req->req_flags)) {
+                               /* Handle eh_abort timeout */
+                               BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+                                             "in retire_q\n");
+                               if (io_req->wait_for_comp)
+                                       complete(&io_req->tm_done);
+                       }
                        kref_put(&io_req->refcount, bnx2fc_cmd_release);
+               }
 
                clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
        }
index 0c53c28dc3d379a5a09960e2e9ab8fae53cb8526..7e77cf62029190562d718fe7c365cb087c3e573e 100644 (file)
@@ -350,6 +350,7 @@ struct bnx2i_hba {
        struct pci_dev *pcidev;
        struct net_device *netdev;
        void __iomem *regview;
+       resource_size_t reg_base;
 
        u32 age;
        unsigned long cnic_dev_type;
index ece47e502282d389b91e5d170ca023688a22deb6..86a12b48e477e804e12765e7d535d7e70f44405e 100644 (file)
@@ -2724,7 +2724,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                goto arm_cq;
        }
 
-       reg_base = ep->hba->netdev->base_addr;
        if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
            (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
                config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2739,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                /* 5709 device in normal node and 5706/5708 devices */
                reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
 
-       ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+       ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
                                          MB_KERNEL_CTX_SIZE);
        if (!ep->qp.ctx_base)
                return -ENOMEM;
index f8d516b531617426cc05d626188075facf4e5a6a..621538b8b5445fd3e3e2685213ae7118f7229fd0 100644 (file)
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
        bnx2i_identify_device(hba);
        bnx2i_setup_host_queue_size(hba, shost);
 
+       hba->reg_base = pci_resource_start(hba->pcidev, 0);
        if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr,
-                                              BNX2_MQ_CONFIG2);
+               hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
                if (!hba->regview)
                        goto ioreg_map_err;
        } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+               hba->regview = pci_iomap(hba->pcidev, 0, 4096);
                if (!hba->regview)
                        goto ioreg_map_err;
        }
@@ -884,7 +884,7 @@ cid_que_err:
        bnx2i_free_mp_bdt(hba);
 mp_bdt_mem_err:
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
 ioreg_map_err:
@@ -910,7 +910,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
        pci_dev_put(hba->pcidev);
 
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
        bnx2i_free_mp_bdt(hba);
index f6d37d0271f73bd08538bc3028e388e0e569b9db..aed0f5db36684c67c8a01817cad83fa553f48cb2 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_FCOE) += fcoe.o
 obj-$(CONFIG_LIBFCOE) += libfcoe.o
 
-libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
index 76e3d0b5bfa676212156d800dd295b46cddfc1de..fe30b1b65e1d3ddc879823a79404efdcc60d4982 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <scsi/fc/fc_encaps.h>
 #include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
 
 #include <scsi/libfc.h>
 #include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
 static int fcoe_vport_disable(struct fc_vport *, bool disable);
 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
+static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
+       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+       .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+       .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+       .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
+};
 
 static struct libfc_function_template fcoe_libfc_fcn_templ = {
        .frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
                                struct net_device *netdev)
 {
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
        struct netdev_hw_addr *ha;
        struct net_device *real_dev;
        u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
                                                    enum fip_state fip_mode)
 {
+       struct fcoe_ctlr_device *ctlr_dev;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
+       int size;
        int err;
 
        if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
                goto out;
        }
 
-       fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
-       if (!fcoe) {
-               FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
+       size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
+       ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
+                                       size);
+       if (!ctlr_dev) {
+               FCOE_DBG("Failed to add fcoe_ctlr_device\n");
                fcoe = ERR_PTR(-ENOMEM);
                goto out_putmod;
        }
 
+       ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       fcoe = fcoe_ctlr_priv(ctlr);
+
        dev_hold(netdev);
 
        /*
         * Initialize FIP.
         */
-       fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
-       fcoe->ctlr.send = fcoe_fip_send;
-       fcoe->ctlr.update_mac = fcoe_update_src_mac;
-       fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
+       fcoe_ctlr_init(ctlr, fip_mode);
+       ctlr->send = fcoe_fip_send;
+       ctlr->update_mac = fcoe_update_src_mac;
+       ctlr->get_src_addr = fcoe_get_src_mac;
 
        err = fcoe_interface_setup(fcoe, netdev);
        if (err) {
-               fcoe_ctlr_destroy(&fcoe->ctlr);
-               kfree(fcoe);
+               fcoe_ctlr_destroy(ctlr);
+               fcoe_ctlr_device_delete(ctlr_dev);
                dev_put(netdev);
                fcoe = ERR_PTR(err);
                goto out_putmod;
@@ -419,7 +443,7 @@ out:
 static void fcoe_interface_remove(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
        u8 flogi_maddr[ETH_ALEN];
        const struct net_device_ops *ops;
 
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
 static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
 
        rtnl_lock();
        if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        /* Release the self-reference taken during fcoe_interface_create() */
        /* tear-down the FCoE controller */
        fcoe_ctlr_destroy(fip);
-       scsi_host_put(fcoe->ctlr.lp->host);
-       kfree(fcoe);
+       scsi_host_put(fip->lp->host);
+       fcoe_ctlr_device_delete(ctlr_dev);
        dev_put(netdev);
        module_put(THIS_MODULE);
 }
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
                         struct net_device *orig_dev)
 {
        struct fcoe_interface *fcoe;
+       struct fcoe_ctlr *ctlr;
 
        fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
-       fcoe_ctlr_recv(&fcoe->ctlr, skb);
+       ctlr = fcoe_to_ctlr(fcoe);
+       fcoe_ctlr_recv(ctlr, skb);
        return 0;
 }
 
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
        u32 mfs;
        u64 wwnn, wwpn;
        struct fcoe_interface *fcoe;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
 
        /* Setup lport private data to point to fcoe softc */
        port = lport_priv(lport);
        fcoe = port->priv;
+       ctlr = fcoe_to_ctlr(fcoe);
 
        /*
         * Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
 
        if (!lport->vport) {
                if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
-                       wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+                       wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
                fc_set_wwnn(lport, wwnn);
                if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
-                       wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+                       wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
                                                 2, 0);
                fc_set_wwpn(lport, wwpn);
        }
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
                                       struct device *parent, int npiv)
 {
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
        struct net_device *netdev = fcoe->netdev;
        struct fc_lport *lport, *n_port;
        struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
        }
 
        /* Initialize the library */
-       rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
+       rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
        if (rc) {
                FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
                                "interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
 {
        struct fc_lport *lport;
        struct fcoe_rcv_info *fr;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_frame_header *fh;
        struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
        unsigned int cpu;
 
        fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
-       lport = fcoe->ctlr.lp;
+       ctlr = fcoe_to_ctlr(fcoe);
+       lport = ctlr->lp;
        if (unlikely(!lport)) {
                FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
                goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
 
        eh = eth_hdr(skb);
 
-       if (is_fip_mode(&fcoe->ctlr) &&
-           compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
+       if (is_fip_mode(ctlr) &&
+           compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
                FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
                                eh->h_source);
                goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        unsigned int elen;              /* eth header, may include vlan */
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
        u8 sof, eof;
        struct fcoe_hdr *hp;
 
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        }
 
        if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
-           fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
+           fcoe_ctlr_els_send(ctlr, lport, skb))
                return 0;
 
        sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        /* fill up mac and fcoe headers */
        eh = eth_hdr(skb);
        eh->h_proto = htons(ETH_P_FCOE);
-       memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
-       if (fcoe->ctlr.map_dest)
+       memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+       if (ctlr->map_dest)
                memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
 
-       if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
-               memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
+       if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+               memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
        else
                memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
 
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
 static inline int fcoe_filter_frames(struct fc_lport *lport,
                                     struct fc_frame *fp)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_frame_header *fh;
        struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
                return 0;
 
        fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
-       if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+       ctlr = fcoe_to_ctlr(fcoe);
+       if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
            ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
                FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
                return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
                                     ulong event, void *ptr)
 {
        struct dcb_app_type *entry = ptr;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct net_device *netdev;
        struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
        if (!fcoe)
                return NOTIFY_OK;
 
+       ctlr = fcoe_to_ctlr(fcoe);
+
        if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
                prio = ffs(entry->app.priority) - 1;
        else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
 
        if (entry->app.protocol == ETH_P_FIP ||
            entry->app.protocol == ETH_P_FCOE)
-               fcoe->ctlr.priority = prio;
+               ctlr->priority = prio;
 
        if (entry->app.protocol == ETH_P_FCOE) {
-               port = lport_priv(fcoe->ctlr.lp);
+               port = lport_priv(ctlr->lp);
                port->priority = prio;
        }
 
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 {
        struct fc_lport *lport = NULL;
        struct net_device *netdev = ptr;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fcoe_port *port;
        struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 
        list_for_each_entry(fcoe, &fcoe_hostlist, list) {
                if (fcoe->netdev == netdev) {
-                       lport = fcoe->ctlr.lp;
+                       ctlr = fcoe_to_ctlr(fcoe);
+                       lport = ctlr->lp;
                        break;
                }
        }
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
                break;
        case NETDEV_UNREGISTER:
                list_del(&fcoe->list);
-               port = lport_priv(fcoe->ctlr.lp);
+               port = lport_priv(ctlr->lp);
                queue_work(fcoe_wq, &port->destroy_work);
                goto out;
                break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
        fcoe_link_speed_update(lport);
 
        if (link_possible && !fcoe_link_ok(lport))
-               fcoe_ctlr_link_up(&fcoe->ctlr);
-       else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
+               fcoe_ctlr_link_up(ctlr);
+       else if (fcoe_ctlr_link_down(ctlr)) {
                stats = per_cpu_ptr(lport->dev_stats, get_cpu());
                stats->LinkFailureCount++;
                put_cpu();
@@ -2003,6 +2043,7 @@ out:
  */
 static int fcoe_disable(struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        int rc = 0;
 
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
        rtnl_unlock();
 
        if (fcoe) {
-               fcoe_ctlr_link_down(&fcoe->ctlr);
-               fcoe_clean_pending_queue(fcoe->ctlr.lp);
+               ctlr = fcoe_to_ctlr(fcoe);
+               fcoe_ctlr_link_down(ctlr);
+               fcoe_clean_pending_queue(ctlr->lp);
        } else
                rc = -ENODEV;
 
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
  */
 static int fcoe_enable(struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        int rc = 0;
 
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
        fcoe = fcoe_hostlist_lookup_port(netdev);
        rtnl_unlock();
 
-       if (!fcoe)
+       if (!fcoe) {
                rc = -ENODEV;
-       else if (!fcoe_link_ok(fcoe->ctlr.lp))
-               fcoe_ctlr_link_up(&fcoe->ctlr);
+               goto out;
+       }
+
+       ctlr = fcoe_to_ctlr(fcoe);
+
+       if (!fcoe_link_ok(ctlr->lp))
+               fcoe_ctlr_link_up(ctlr);
 
+out:
        mutex_unlock(&fcoe_config_mutex);
        return rc;
 }
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
  */
 static int fcoe_destroy(struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_lport *lport;
        struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
                rc = -ENODEV;
                goto out_nodev;
        }
-       lport = fcoe->ctlr.lp;
+       ctlr = fcoe_to_ctlr(fcoe);
+       lport = ctlr->lp;
        port = lport_priv(lport);
        list_del(&fcoe->list);
        queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
        int dcbx;
        u8 fup, up;
        struct net_device *netdev = fcoe->realdev;
-       struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+       struct fcoe_port *port = lport_priv(ctlr->lp);
        struct dcb_app app = {
                                .priority = 0,
                                .protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
                }
 
                port->priority = ffs(up) ? ffs(up) - 1 : 0;
-               fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+               ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
        }
 #endif
 }
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
 {
        int rc = 0;
+       struct fcoe_ctlr_device *ctlr_dev;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_lport *lport;
 
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
                goto out_nodev;
        }
 
-       lport = fcoe_if_create(fcoe, &netdev->dev, 0);
+       ctlr = fcoe_to_ctlr(fcoe);
+       ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+       lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
        if (IS_ERR(lport)) {
                printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
                       netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        }
 
        /* Make this the "master" N_Port */
-       fcoe->ctlr.lp = lport;
+       ctlr->lp = lport;
 
        /* setup DCB priority attributes. */
        fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        fc_fabric_login(lport);
        if (!fcoe_link_ok(lport)) {
                rtnl_unlock();
-               fcoe_ctlr_link_up(&fcoe->ctlr);
+               fcoe_ctlr_link_up(ctlr);
                mutex_unlock(&fcoe_config_mutex);
                return rc;
        }
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
        struct fc_lport *lport = shost_priv(shost);
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 
-       fcoe_ctlr_link_down(&fcoe->ctlr);
-       fcoe_clean_pending_queue(fcoe->ctlr.lp);
-       if (!fcoe_link_ok(fcoe->ctlr.lp))
-               fcoe_ctlr_link_up(&fcoe->ctlr);
+       fcoe_ctlr_link_down(ctlr);
+       fcoe_clean_pending_queue(ctlr->lp);
+       if (!fcoe_link_ok(ctlr->lp))
+               fcoe_ctlr_link_up(ctlr);
        return 0;
 }
 
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
  */
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
 
        fcoe = fcoe_hostlist_lookup_port(netdev);
-       return (fcoe) ? fcoe->ctlr.lp : NULL;
+       ctlr = fcoe_to_ctlr(fcoe);
+       return (fcoe) ? ctlr->lp : NULL;
 }
 
 /**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
 static void __exit fcoe_exit(void)
 {
        struct fcoe_interface *fcoe, *tmp;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
        unsigned int cpu;
 
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
        rtnl_lock();
        list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
                list_del(&fcoe->list);
-               port = lport_priv(fcoe->ctlr.lp);
+               ctlr = fcoe_to_ctlr(fcoe);
+               port = lport_priv(ctlr->lp);
                queue_work(fcoe_wq, &port->destroy_work);
        }
        rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
 {
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
        struct fc_frame_header *fh = fc_frame_header_get(fp);
 
        switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
        __fcoe_get_lesb(lport, fc_lesb, netdev);
 }
 
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct net_device *netdev = fcoe_netdev(fip->lp);
+       struct fcoe_fc_els_lesb *fcoe_lesb;
+       struct fc_els_lesb fc_lesb;
+
+       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+       ctlr_dev->lesb.lesb_link_fail =
+               ntohl(fcoe_lesb->lesb_link_fail);
+       ctlr_dev->lesb.lesb_vlink_fail =
+               ntohl(fcoe_lesb->lesb_vlink_fail);
+       ctlr_dev->lesb.lesb_miss_fka =
+               ntohl(fcoe_lesb->lesb_miss_fka);
+       ctlr_dev->lesb.lesb_symb_err =
+               ntohl(fcoe_lesb->lesb_symb_err);
+       ctlr_dev->lesb.lesb_err_block =
+               ntohl(fcoe_lesb->lesb_err_block);
+       ctlr_dev->lesb.lesb_fcs_error =
+               ntohl(fcoe_lesb->lesb_fcs_error);
+}
+
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+       struct fcoe_ctlr_device *ctlr_dev =
+               fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+       fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
+}
+
 /**
  * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
  * @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
 {
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 
        if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
-               fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+               fcoe_ctlr_recv_flogi(ctlr, lport, fp);
 }
index 96ac938d39ccc81bc8acb4f0dc34d3ac4de23bef..a624add4f8ecbae730b01f6daab8b8f88ba1ea4d 100644 (file)
@@ -68,7 +68,6 @@ do {                                                                  \
  * @netdev:          The associated net device
  * @fcoe_packet_type: FCoE packet type
  * @fip_packet_type:  FIP packet type
- * @ctlr:            The FCoE controller (for FIP)
  * @oem:             The offload exchange manager for all local port
  *                   instances associated with this port
  * @removed:         Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
        struct net_device  *realdev;
        struct packet_type fcoe_packet_type;
        struct packet_type fip_packet_type;
-       struct fcoe_ctlr   ctlr;
        struct fc_exch_mgr *oem;
        u8      removed;
 };
 
-#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
+#define fcoe_to_ctlr(x)                                                \
+       (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
+
+#define fcoe_from_ctlr(x)                      \
+       ((struct fcoe_interface *)((x) + 1))
 
 /**
  * fcoe_netdev() - Return the net device associated with a local port
index 5a4c7250aa77abd218ea52c65da31fb514abfd30..d68d57241ee68227703ce1880e52c2ce31b4684d 100644 (file)
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
 }
 EXPORT_SYMBOL(fcoe_ctlr_init);
 
+static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
+{
+       struct fcoe_ctlr *fip = new->fip;
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+       struct fcoe_fcf_device temp, *fcf_dev;
+       int rc = 0;
+
+       LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+                       new->fabric_name, new->fcf_mac);
+
+       mutex_lock(&ctlr_dev->lock);
+
+       temp.fabric_name = new->fabric_name;
+       temp.switch_name = new->switch_name;
+       temp.fc_map = new->fc_map;
+       temp.vfid = new->vfid;
+       memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
+       temp.priority = new->pri;
+       temp.fka_period = new->fka_period;
+       temp.selected = 0; /* default to unselected */
+
+       fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
+       if (unlikely(!fcf_dev)) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /*
+        * The fcoe_sysfs layer can return a CONNECTED fcf that
+        * has a priv (fcf was never deleted) or a CONNECTED fcf
+        * that doesn't have a priv (fcf was deleted). However,
+        * libfcoe will always delete FCFs before trying to add
+        * them. This is ensured because both recv_adv and
+        * age_fcfs are protected by the the fcoe_ctlr's mutex.
+        * This means that we should never get a FCF with a
+        * non-NULL priv pointer.
+        */
+       BUG_ON(fcf_dev->priv);
+
+       fcf_dev->priv = new;
+       new->fcf_dev = fcf_dev;
+
+       list_add(&new->list, &fip->fcfs);
+       fip->fcf_count++;
+
+out:
+       mutex_unlock(&ctlr_dev->lock);
+       return rc;
+}
+
+static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+{
+       struct fcoe_ctlr *fip = new->fip;
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+       struct fcoe_fcf_device *fcf_dev;
+
+       list_del(&new->list);
+       fip->fcf_count--;
+
+       mutex_lock(&ctlr_dev->lock);
+
+       fcf_dev = fcoe_fcf_to_fcf_dev(new);
+       WARN_ON(!fcf_dev);
+       new->fcf_dev = NULL;
+       fcoe_fcf_device_delete(fcf_dev);
+       kfree(new);
+
+       mutex_unlock(&ctlr_dev->lock);
+}
+
 /**
  * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
  * @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
 
        fip->sel_fcf = NULL;
        list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
-               list_del(&fcf->list);
-               kfree(fcf);
+               fcoe_sysfs_fcf_del(fcf);
        }
-       fip->fcf_count = 0;
+       WARN_ON(fip->fcf_count);
+
        fip->sel_time = 0;
 }
 
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
        unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
        unsigned long deadline;
        unsigned long sel_time = 0;
+       struct list_head del_list;
        struct fcoe_dev_stats *stats;
 
+       INIT_LIST_HEAD(&del_list);
+
        stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
 
        list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
                if (time_after_eq(jiffies, deadline)) {
                        if (fip->sel_fcf == fcf)
                                fip->sel_fcf = NULL;
+                       /*
+                        * Move to delete list so we can call
+                        * fcoe_sysfs_fcf_del (which can sleep)
+                        * after the put_cpu().
+                        */
                        list_del(&fcf->list);
-                       WARN_ON(!fip->fcf_count);
-                       fip->fcf_count--;
-                       kfree(fcf);
+                       list_add(&fcf->list, &del_list);
                        stats->VLinkFailureCount++;
                } else {
                        if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
                }
        }
        put_cpu();
+
+       list_for_each_entry_safe(fcf, next, &del_list, list) {
+               /* Removes fcf from current list */
+               fcoe_sysfs_fcf_del(fcf);
+       }
+
        if (sel_time && !fip->sel_fcf && !fip->sel_time) {
                sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
                fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fcoe_fcf *fcf;
        struct fcoe_fcf new;
-       struct fcoe_fcf *found;
        unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
        int first = 0;
        int mtu_valid;
+       int found = 0;
+       int rc = 0;
 
        if (fcoe_ctlr_parse_adv(fip, skb, &new))
                return;
 
        mutex_lock(&fip->ctlr_mutex);
        first = list_empty(&fip->fcfs);
-       found = NULL;
        list_for_each_entry(fcf, &fip->fcfs, list) {
                if (fcf->switch_name == new.switch_name &&
                    fcf->fabric_name == new.fabric_name &&
                    fcf->fc_map == new.fc_map &&
                    compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
-                       found = fcf;
+                       found = 1;
                        break;
                }
        }
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                if (!fcf)
                        goto out;
 
-               fip->fcf_count++;
                memcpy(fcf, &new, sizeof(new));
-               list_add(&fcf->list, &fip->fcfs);
+               fcf->fip = fip;
+               rc = fcoe_sysfs_fcf_add(fcf);
+               if (rc) {
+                       printk(KERN_ERR "Failed to allocate sysfs instance "
+                              "for FCF, fab %16.16llx mac %pM\n",
+                              new.fabric_name, new.fcf_mac);
+                       kfree(fcf);
+                       goto out;
+               }
        } else {
                /*
                 * Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                fcf->fka_period = new.fka_period;
                memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
        }
+
        mtu_valid = fcoe_ctlr_mtu_valid(fcf);
        fcf->time = jiffies;
        if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                    time_before(fip->sel_time, fip->timer.expires))
                        mod_timer(&fip->timer, fip->sel_time);
        }
+
 out:
        mutex_unlock(&fip->ctlr_mutex);
 }
@@ -2718,9 +2809,9 @@ unlock:
 
 /**
  * fcoe_libfc_config() - Sets up libfc related properties for local port
- * @lp: The local port to configure libfc for
- * @fip: The FCoE controller in use by the local port
- * @tt: The libfc function template
+ * @lport:    The local port to configure libfc for
+ * @fip:      The FCoE controller in use by the local port
+ * @tt:       The libfc function template
  * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
  *
  * Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
        return 0;
 }
 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
+{
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct fcoe_fcf *fcf;
+
+       mutex_lock(&fip->ctlr_mutex);
+       mutex_lock(&ctlr_dev->lock);
+
+       fcf = fcoe_fcf_device_priv(fcf_dev);
+       if (fcf)
+               fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
+       else
+               fcf_dev->selected = 0;
+
+       mutex_unlock(&ctlr_dev->lock);
+       mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_fcf_get_selected);
+
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+       mutex_lock(&ctlr->ctlr_mutex);
+       switch (ctlr->mode) {
+       case FIP_MODE_FABRIC:
+               ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
+               break;
+       case FIP_MODE_VN2VN:
+               ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+               break;
+       default:
+               ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+               break;
+       }
+       mutex_unlock(&ctlr->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644 (file)
index 0000000..2bc1631
--- /dev/null
@@ -0,0 +1,832 @@
+/*
+ * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include <scsi/fcoe_sysfs.h>
+
+static atomic_t ctlr_num;
+static atomic_t fcf_num;
+
+/*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+ * should insulate the loss of a fcf.
+ */
+static unsigned int fcoe_fcf_dev_loss_tmo = 1800;  /* seconds */
+
+module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
+                  uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fcf_dev_loss_tmo,
+                "Maximum number of seconds that libfcoe should"
+                " insulate the loss of a fcf. Once this value is"
+                " exceeded, the fcf is removed.");
+
+/*
+ * These are used by the fcoe_*_show_function routines, they
+ * are intentionally placed in the .c file as they're not intended
+ * for use throughout the code.
+ */
+#define fcoe_ctlr_id(x)                                \
+       ((x)->id)
+#define fcoe_ctlr_work_q_name(x)               \
+       ((x)->work_q_name)
+#define fcoe_ctlr_work_q(x)                    \
+       ((x)->work_q)
+#define fcoe_ctlr_devloss_work_q_name(x)       \
+       ((x)->devloss_work_q_name)
+#define fcoe_ctlr_devloss_work_q(x)            \
+       ((x)->devloss_work_q)
+#define fcoe_ctlr_mode(x)                      \
+       ((x)->mode)
+#define fcoe_ctlr_fcf_dev_loss_tmo(x)          \
+       ((x)->fcf_dev_loss_tmo)
+#define fcoe_ctlr_link_fail(x)                 \
+       ((x)->lesb.lesb_link_fail)
+#define fcoe_ctlr_vlink_fail(x)                        \
+       ((x)->lesb.lesb_vlink_fail)
+#define fcoe_ctlr_miss_fka(x)                  \
+       ((x)->lesb.lesb_miss_fka)
+#define fcoe_ctlr_symb_err(x)                  \
+       ((x)->lesb.lesb_symb_err)
+#define fcoe_ctlr_err_block(x)                 \
+       ((x)->lesb.lesb_err_block)
+#define fcoe_ctlr_fcs_error(x)                 \
+       ((x)->lesb.lesb_fcs_error)
+#define fcoe_fcf_state(x)                      \
+       ((x)->state)
+#define fcoe_fcf_fabric_name(x)                        \
+       ((x)->fabric_name)
+#define fcoe_fcf_switch_name(x)                        \
+       ((x)->switch_name)
+#define fcoe_fcf_fc_map(x)                     \
+       ((x)->fc_map)
+#define fcoe_fcf_vfid(x)                       \
+       ((x)->vfid)
+#define fcoe_fcf_mac(x)                                \
+       ((x)->mac)
+#define fcoe_fcf_priority(x)                   \
+       ((x)->priority)
+#define fcoe_fcf_fka_period(x)                 \
+       ((x)->fka_period)
+#define fcoe_fcf_dev_loss_tmo(x)               \
+       ((x)->dev_loss_tmo)
+#define fcoe_fcf_selected(x)                   \
+       ((x)->selected)
+#define fcoe_fcf_vlan_id(x)                    \
+       ((x)->vlan_id)
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+       int ret;
+
+       ret = kstrtoul(buf, 0, val);
+       if (ret || *val < 0)
+               return -EINVAL;
+       /*
+        * Check for overflow; dev_loss_tmo is u32
+        */
+       if (*val > UINT_MAX)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
+                                    unsigned long val)
+{
+       if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
+           (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
+           (fcf->state == FCOE_FCF_STATE_DELETED))
+               return -EBUSY;
+       /*
+        * Check for overflow; dev_loss_tmo is u32
+        */
+       if (val > UINT_MAX)
+               return -EINVAL;
+
+       fcoe_fcf_dev_loss_tmo(fcf) = val;
+       return 0;
+}
+
+#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
+       __ATTR(_name, _mode, _show, _store)
+
+#define fcoe_ctlr_show_function(field, format_string, sz, cast)        \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+                                           struct device_attribute *attr, \
+                                           char *buf)                  \
+{                                                                      \
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);               \
+       if (ctlr->f->get_fcoe_ctlr_##field)                             \
+               ctlr->f->get_fcoe_ctlr_##field(ctlr);                   \
+       return snprintf(buf, sz, format_string,                         \
+                       cast fcoe_ctlr_##field(ctlr));                  \
+}
+
+#define fcoe_fcf_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev,        \
+                                          struct device_attribute *attr, \
+                                          char *buf)                   \
+{                                                                      \
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);                  \
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);  \
+       if (ctlr->f->get_fcoe_fcf_##field)                              \
+               ctlr->f->get_fcoe_fcf_##field(fcf);                     \
+       return snprintf(buf, sz, format_string,                         \
+                       cast fcoe_fcf_##field(fcf));                    \
+}
+
+#define fcoe_ctlr_private_show_function(field, format_string, sz, cast)        \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+                                           struct device_attribute *attr, \
+                                           char *buf)                  \
+{                                                                      \
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);               \
+       return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev,        \
+                                          struct device_attribute *attr, \
+                                          char *buf)                   \
+{                                                              \
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);                  \
+       return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_rd_attr(field, format_string, sz)            \
+       fcoe_ctlr_private_show_function(field, format_string, sz, )     \
+       static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,                   \
+                               show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_ctlr_rd_attr(field, format_string, sz)                    \
+       fcoe_ctlr_show_function(field, format_string, sz, )             \
+       static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,                   \
+                               show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_rd_attr(field, format_string, sz)                     \
+       fcoe_fcf_show_function(field, format_string, sz, )              \
+       static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,                    \
+                               show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr(field, format_string, sz)             \
+       fcoe_fcf_private_show_function(field, format_string, sz, )      \
+       static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,                    \
+                               show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
+       fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
+       static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,                   \
+                               show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast)  \
+       fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
+       static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,                    \
+                               show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_enum_name_search(title, table_type, table)                        \
+static const char *get_fcoe_##title##_name(enum table_type table_key)  \
+{                                                                      \
+       int i;                                                          \
+       char *name = NULL;                                              \
+                                                                       \
+       for (i = 0; i < ARRAY_SIZE(table); i++) {                       \
+               if (table[i].value == table_key) {                      \
+                       name = table[i].name;                           \
+                       break;                                          \
+               }                                                       \
+       }                                                               \
+       return name;                                                    \
+}
+
+static struct {
+       enum fcf_state value;
+       char           *name;
+} fcf_state_names[] = {
+       { FCOE_FCF_STATE_UNKNOWN,      "Unknown" },
+       { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
+       { FCOE_FCF_STATE_CONNECTED,    "Connected" },
+};
+fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
+#define FCOE_FCF_STATE_MAX_NAMELEN 50
+
+static ssize_t show_fcf_state(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+       const char *name;
+       name = get_fcoe_fcf_state_name(fcf->state);
+       if (!name)
+               return -EINVAL;
+       return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
+
+static struct {
+       enum fip_conn_type value;
+       char               *name;
+} fip_conn_type_names[] = {
+       { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
+       { FIP_CONN_TYPE_FABRIC, "Fabric" },
+       { FIP_CONN_TYPE_VN2VN, "VN2VN" },
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+#define FCOE_CTLR_MODE_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_mode(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       const char *name;
+
+       if (ctlr->f->get_fcoe_ctlr_mode)
+               ctlr->f->get_fcoe_ctlr_mode(ctlr);
+
+       name = get_fcoe_ctlr_mode_name(ctlr->mode);
+       if (!name)
+               return -EINVAL;
+       return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+                       "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
+                       show_ctlr_mode, NULL);
+
+static ssize_t
+store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t count)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       struct fcoe_fcf_device *fcf;
+       unsigned long val;
+       int rc;
+
+       rc = fcoe_str_to_dev_loss(buf, &val);
+       if (rc)
+               return rc;
+
+       fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
+       mutex_lock(&ctlr->lock);
+       list_for_each_entry(fcf, &ctlr->fcfs, peers)
+               fcoe_fcf_set_dev_loss_tmo(fcf, val);
+       mutex_unlock(&ctlr->lock);
+       return count;
+}
+fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
+static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
+                       show_fcoe_ctlr_device_fcf_dev_loss_tmo,
+                       store_private_fcoe_ctlr_fcf_dev_loss_tmo);
+
+/* Link Error Status Block (LESB) */
+fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
+fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
+fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
+fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
+
+fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
+fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
+fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
+fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
+fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
+fcoe_fcf_rd_attr(selected, "%u\n", 20);
+fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
+
+fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+       unsigned long val;
+       int rc;
+
+       rc = fcoe_str_to_dev_loss(buf, &val);
+       if (rc)
+               return rc;
+
+       rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
+       if (rc)
+               return rc;
+       return count;
+}
+static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
+                       show_fcoe_fcf_device_dev_loss_tmo,
+                       store_fcoe_fcf_dev_loss_tmo);
+
+static struct attribute *fcoe_ctlr_lesb_attrs[] = {
+       &device_attr_fcoe_ctlr_link_fail.attr,
+       &device_attr_fcoe_ctlr_vlink_fail.attr,
+       &device_attr_fcoe_ctlr_miss_fka.attr,
+       &device_attr_fcoe_ctlr_symb_err.attr,
+       &device_attr_fcoe_ctlr_err_block.attr,
+       &device_attr_fcoe_ctlr_fcs_error.attr,
+       NULL,
+};
+
+static struct attribute_group fcoe_ctlr_lesb_attr_group = {
+       .name = "lesb",
+       .attrs = fcoe_ctlr_lesb_attrs,
+};
+
+static struct attribute *fcoe_ctlr_attrs[] = {
+       &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+       &device_attr_fcoe_ctlr_mode.attr,
+       NULL,
+};
+
+static struct attribute_group fcoe_ctlr_attr_group = {
+       .attrs = fcoe_ctlr_attrs,
+};
+
+static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
+       &fcoe_ctlr_attr_group,
+       &fcoe_ctlr_lesb_attr_group,
+       NULL,
+};
+
+static struct attribute *fcoe_fcf_attrs[] = {
+       &device_attr_fcoe_fcf_fabric_name.attr,
+       &device_attr_fcoe_fcf_switch_name.attr,
+       &device_attr_fcoe_fcf_dev_loss_tmo.attr,
+       &device_attr_fcoe_fcf_fc_map.attr,
+       &device_attr_fcoe_fcf_vfid.attr,
+       &device_attr_fcoe_fcf_mac.attr,
+       &device_attr_fcoe_fcf_priority.attr,
+       &device_attr_fcoe_fcf_fka_period.attr,
+       &device_attr_fcoe_fcf_state.attr,
+       &device_attr_fcoe_fcf_selected.attr,
+       &device_attr_fcoe_fcf_vlan_id.attr,
+       NULL
+};
+
+static struct attribute_group fcoe_fcf_attr_group = {
+       .attrs = fcoe_fcf_attrs,
+};
+
+static const struct attribute_group *fcoe_fcf_attr_groups[] = {
+       &fcoe_fcf_attr_group,
+       NULL,
+};
+
+struct bus_type fcoe_bus_type;
+
+static int fcoe_bus_match(struct device *dev,
+                         struct device_driver *drv)
+{
+       if (dev->bus == &fcoe_bus_type)
+               return 1;
+       return 0;
+}
+
+/**
+ * fcoe_ctlr_device_release() - Release the FIP ctlr memory
+ * @dev: Pointer to the FIP ctlr's embedded device
+ *
+ * Called when the last FIP ctlr reference is released.
+ */
+static void fcoe_ctlr_device_release(struct device *dev)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       kfree(ctlr);
+}
+
+/**
+ * fcoe_fcf_device_release() - Release the FIP fcf memory
+ * @dev: Pointer to the fcf's embedded device
+ *
+ * Called when the last FIP fcf reference is released.
+ */
+static void fcoe_fcf_device_release(struct device *dev)
+{
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+       kfree(fcf);
+}
+
+struct device_type fcoe_ctlr_device_type = {
+       .name = "fcoe_ctlr",
+       .groups = fcoe_ctlr_attr_groups,
+       .release = fcoe_ctlr_device_release,
+};
+
+struct device_type fcoe_fcf_device_type = {
+       .name = "fcoe_fcf",
+       .groups = fcoe_fcf_attr_groups,
+       .release = fcoe_fcf_device_release,
+};
+
+struct bus_type fcoe_bus_type = {
+       .name = "fcoe",
+       .match = &fcoe_bus_match,
+};
+
+/**
+ * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
+{
+       if (!fcoe_ctlr_work_q(ctlr)) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to flush work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+               return;
+       }
+
+       flush_workqueue(fcoe_ctlr_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work:   Work to queue for execution
+ *
+ * Return value:
+ *     1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
+                              struct work_struct *work)
+{
+       if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to queue work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+
+               return -EINVAL;
+       }
+
+       return queue_work(fcoe_ctlr_work_q(ctlr), work);
+}
+
+/**
+ * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
+{
+       if (!fcoe_ctlr_devloss_work_q(ctlr)) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to flush work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+               return;
+       }
+
+       flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work:   Work to queue for execution
+ * @delay:  jiffies to delay the work queuing
+ *
+ * Return value:
+ *     1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
+                                      struct delayed_work *work,
+                                      unsigned long delay)
+{
+       if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to queue work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+
+               return -EINVAL;
+       }
+
+       return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
+}
+
+static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
+                                struct fcoe_fcf_device *old)
+{
+       if (new->switch_name == old->switch_name &&
+           new->fabric_name == old->fabric_name &&
+           new->fc_map == old->fc_map &&
+           compare_ether_addr(new->mac, old->mac) == 0)
+               return 1;
+       return 0;
+}
+
+/**
+ * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
+ * @parent:    The parent device to which the fcoe_ctlr instance
+ *             should be attached
+ * @f:         The LLD's FCoE sysfs function template pointer
+ * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
+ *
+ * This routine allocates a FIP ctlr object with some additional memory
+ * for the LLD. The FIP ctlr is initialized, added to sysfs and then
+ * attributes are added to it.
+ */
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+                                   struct fcoe_sysfs_function_template *f,
+                                   int priv_size)
+{
+       struct fcoe_ctlr_device *ctlr;
+       int error = 0;
+
+       ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
+                      GFP_KERNEL);
+       if (!ctlr)
+               goto out;
+
+       ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+       ctlr->f = f;
+       INIT_LIST_HEAD(&ctlr->fcfs);
+       mutex_init(&ctlr->lock);
+       ctlr->dev.parent = parent;
+       ctlr->dev.bus = &fcoe_bus_type;
+       ctlr->dev.type = &fcoe_ctlr_device_type;
+
+       ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
+
+       snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
+                "ctlr_wq_%d", ctlr->id);
+       ctlr->work_q = create_singlethread_workqueue(
+               ctlr->work_q_name);
+       if (!ctlr->work_q)
+               goto out_del;
+
+       snprintf(ctlr->devloss_work_q_name,
+                sizeof(ctlr->devloss_work_q_name),
+                "ctlr_dl_wq_%d", ctlr->id);
+       ctlr->devloss_work_q = create_singlethread_workqueue(
+               ctlr->devloss_work_q_name);
+       if (!ctlr->devloss_work_q)
+               goto out_del_q;
+
+       dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+       error = device_register(&ctlr->dev);
+       if (error)
+               goto out_del_q2;
+
+       return ctlr;
+
+out_del_q2:
+       destroy_workqueue(ctlr->devloss_work_q);
+       ctlr->devloss_work_q = NULL;
+out_del_q:
+       destroy_workqueue(ctlr->work_q);
+       ctlr->work_q = NULL;
+out_del:
+       kfree(ctlr);
+out:
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
+
+/**
+ * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
+ * @ctlr: A pointer to the ctlr to be deleted
+ *
+ * Deletes a FIP ctlr and any fcfs attached
+ * to it. Deleting fcfs will cause their childen
+ * to be deleted as well.
+ *
+ * The ctlr is detached from sysfs and it's resources
+ * are freed (work q), but the memory is not freed
+ * until its last reference is released.
+ *
+ * This routine expects no locks to be held before
+ * calling.
+ *
+ * TODO: Currently there are no callbacks to clean up LLD data
+ * for a fcoe_fcf_device. LLDs must keep this in mind as they need
+ * to clean up each of their LLD data for all fcoe_fcf_device before
+ * calling fcoe_ctlr_device_delete.
+ */
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
+{
+       struct fcoe_fcf_device *fcf, *next;
+       /* Remove any attached fcfs */
+       mutex_lock(&ctlr->lock);
+       list_for_each_entry_safe(fcf, next,
+                                &ctlr->fcfs, peers) {
+               list_del(&fcf->peers);
+               fcf->state = FCOE_FCF_STATE_DELETED;
+               fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+       }
+       mutex_unlock(&ctlr->lock);
+
+       fcoe_ctlr_device_flush_work(ctlr);
+
+       destroy_workqueue(ctlr->devloss_work_q);
+       ctlr->devloss_work_q = NULL;
+       destroy_workqueue(ctlr->work_q);
+       ctlr->work_q = NULL;
+
+       device_unregister(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
+
+/**
+ * fcoe_fcf_device_final_delete() - Final delete routine
+ * @work: The FIP fcf's embedded work struct
+ *
+ * It is expected that the fcf has been removed from
+ * the FIP ctlr's list before calling this routine.
+ */
+static void fcoe_fcf_device_final_delete(struct work_struct *work)
+{
+       struct fcoe_fcf_device *fcf =
+               container_of(work, struct fcoe_fcf_device, delete_work);
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+       /*
+        * Cancel any outstanding timers. These should really exist
+        * only when rmmod'ing the LLDD and we're asking for
+        * immediate termination of the rports
+        */
+       if (!cancel_delayed_work(&fcf->dev_loss_work))
+               fcoe_ctlr_device_flush_devloss(ctlr);
+
+       device_unregister(&fcf->dev);
+}
+
+/**
+ * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
+ * @work: The FIP fcf's embedded work struct
+ *
+ * Removes the fcf from the FIP ctlr's list of fcfs and
+ * queues the final deletion.
+ */
+static void fip_timeout_deleted_fcf(struct work_struct *work)
+{
+       struct fcoe_fcf_device *fcf =
+               container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+       mutex_lock(&ctlr->lock);
+
+       /*
+        * If the fcf is deleted or reconnected before the timer
+        * fires the devloss queue will be flushed, but the state will
+        * either be CONNECTED or DELETED. If that is the case we
+        * cancel deleting the fcf.
+        */
+       if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
+               goto out;
+
+       dev_printk(KERN_ERR, &fcf->dev,
+                  "FIP fcf connection time out: removing fcf\n");
+
+       list_del(&fcf->peers);
+       fcf->state = FCOE_FCF_STATE_DELETED;
+       fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+
+out:
+       mutex_unlock(&ctlr->lock);
+}
+
+/**
+ * fcoe_fcf_device_delete() - Delete a FIP fcf
+ * @fcf: Pointer to the fcf which is to be deleted
+ *
+ * Queues the FIP fcf on the devloss workqueue
+ *
+ * Expects the ctlr_attrs mutex to be held for fcf
+ * state change.
+ */
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
+{
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+       int timeout = fcf->dev_loss_tmo;
+
+       if (fcf->state != FCOE_FCF_STATE_CONNECTED)
+               return;
+
+       fcf->state = FCOE_FCF_STATE_DISCONNECTED;
+
+       /*
+        * FCF will only be re-connected by the LLD calling
+        * fcoe_fcf_device_add, and it should be setting up
+        * priv then.
+        */
+       fcf->priv = NULL;
+
+       fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
+                                          timeout * HZ);
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
+
+/**
+ * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
+ * @ctlr:    The fcoe_ctlr_device that will be the fcoe_fcf_device parent
+ * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
+ *
+ * Expects to be called with the ctlr->lock held
+ */
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+                                           struct fcoe_fcf_device *new_fcf)
+{
+       struct fcoe_fcf_device *fcf;
+       int error = 0;
+
+       list_for_each_entry(fcf, &ctlr->fcfs, peers) {
+               if (fcoe_fcf_device_match(new_fcf, fcf)) {
+                       if (fcf->state == FCOE_FCF_STATE_CONNECTED)
+                               return fcf;
+
+                       fcf->state = FCOE_FCF_STATE_CONNECTED;
+
+                       if (!cancel_delayed_work(&fcf->dev_loss_work))
+                               fcoe_ctlr_device_flush_devloss(ctlr);
+
+                       return fcf;
+               }
+       }
+
+       fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
+       if (unlikely(!fcf))
+               goto out;
+
+       INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
+       INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
+
+       fcf->dev.parent = &ctlr->dev;
+       fcf->dev.bus = &fcoe_bus_type;
+       fcf->dev.type = &fcoe_fcf_device_type;
+       fcf->id = atomic_inc_return(&fcf_num) - 1;
+       fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+       fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+
+       dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
+
+       fcf->fabric_name = new_fcf->fabric_name;
+       fcf->switch_name = new_fcf->switch_name;
+       fcf->fc_map = new_fcf->fc_map;
+       fcf->vfid = new_fcf->vfid;
+       memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
+       fcf->priority = new_fcf->priority;
+       fcf->fka_period = new_fcf->fka_period;
+       fcf->selected = new_fcf->selected;
+
+       error = device_register(&fcf->dev);
+       if (error)
+               goto out_del;
+
+       fcf->state = FCOE_FCF_STATE_CONNECTED;
+       list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+       return fcf;
+
+out_del:
+       kfree(fcf);
+out:
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
+
+int __init fcoe_sysfs_setup(void)
+{
+       int error;
+
+       atomic_set(&ctlr_num, 0);
+       atomic_set(&fcf_num, 0);
+
+       error = bus_register(&fcoe_bus_type);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+void __exit fcoe_sysfs_teardown(void)
+{
+       bus_unregister(&fcoe_bus_type);
+}
index 710e149d41b60b0e1f087acce76b9b074953eb15..b46f43dced78eb6b77337905ff20eeb16d451a0c 100644 (file)
@@ -815,9 +815,17 @@ out_nodev:
  */
 static int __init libfcoe_init(void)
 {
-       fcoe_transport_init();
+       int rc = 0;
 
-       return 0;
+       rc = fcoe_transport_init();
+       if (rc)
+               return rc;
+
+       rc = fcoe_sysfs_setup();
+       if (rc)
+               fcoe_transport_exit();
+
+       return rc;
 }
 module_init(libfcoe_init);
 
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
  */
 static void __exit libfcoe_exit(void)
 {
+       fcoe_sysfs_teardown();
        fcoe_transport_exit();
 }
 module_exit(libfcoe_exit);
index 441d88ad99a7bb3abadb8b1e9af25281ced8334b..d109cc3a17b64126ee9ff38a5da320a070ffc2c5 100644 (file)
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
        if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
            ((stat->stat == SAM_STAT_CHECK_CONDITION &&
              dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
-               ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+               memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
 
                if (!link->sactive) {
-                       qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                } else {
-                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                        if (unlikely(link->eh_info.err_mask))
                                qc->flags |= ATA_QCFLAG_FAILED;
                }
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
                                qc->flags |= ATA_QCFLAG_FAILED;
                        }
 
-                       dev->sata_dev.tf.feature = 0x04; /* status err */
-                       dev->sata_dev.tf.command = ATA_ERR;
+                       dev->sata_dev.fis[3] = 0x04; /* status err */
+                       dev->sata_dev.fis[2] = ATA_ERR;
                }
        }
 
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
        struct domain_device *dev = qc->ap->private_data;
 
-       memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+       ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
        return true;
 }
 
index 6102ef2cb2d863ff23712d2cc5e6db2032f878bb..9d46fcbe7755fd2aa258e3de91c5a8cd76e3079c 100644 (file)
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
 static inline u8
 _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
 {
-       return ioc->cpu_msix_table[smp_processor_id()];
+       return ioc->cpu_msix_table[raw_smp_processor_id()];
 }
 
 /**
index 6208d562890d1cb3cfb3c85845dddf02086c4fb2..317a7fdc3b825064e4a5677f0a64bc5f3e43a8d6 100644 (file)
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
        Firmware images can be retrieved from:
 
                ftp://ftp.qlogic.com/outgoing/linux/firmware/
+
+config TCM_QLA2XXX
+       tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+       depends on SCSI_QLA_FC && TARGET_CORE
+       select LIBFC
+       select BTREE
+       default n
+       ---help---
+       Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
index 5df782f4a097f9cd8b32455319f3894a5879cd06..dce7d788cdc9c7999795dac0e9819e81943aa223 100644 (file)
@@ -1,5 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
                qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o
+        qla_nx.o qla_target.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
index 5926f5a87ea8e97b0611d246ea99b96b1f6c7b6c..5ab953029f8d1412ee66057c641771bf69d765cd 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/kthread.h>
 #include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                scsi_block_requests(vha->host);
                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                if (IS_QLA82XX(ha)) {
+                       ha->flags.isp82xx_no_md_cap = 1;
                        qla82xx_idc_lock(ha);
                        qla82xx_set_reset_owner(vha);
                        qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                scsi_unblock_requests(vha->host);
                break;
        case 0x2025d:
-               if (!IS_QLA81XX(ha))
+               if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
                        return -EPERM;
 
                ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
-       struct qla_hw_data *ha = vha->hw;
        return snprintf(buf, PAGE_SIZE, "%d\n",
-           ha->qla_stats.total_isp_aborts);
+           vha->qla_stats.total_isp_aborts);
 }
 
 static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                return snprintf(buf, PAGE_SIZE, "\n");
 
        return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
        dma_addr_t stats_dma;
        struct fc_host_statistics *pfc_host_stat;
 
-       pfc_host_stat = &ha->fc_host_stat;
+       pfc_host_stat = &vha->fc_host_stat;
        memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
 
        if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
                pfc_host_stat->dumped_frames = stats->dumped_frames;
                pfc_host_stat->nos_count = stats->nos_rcvd;
        }
-       pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
-       pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
+       pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+       pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
 
 done_free:
         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        fc_host_supported_speeds(vha->host) =
                fc_host_supported_speeds(base_vha->host);
 
+       qlt_vport_create(vha, ha);
        qla24xx_vport_disable(fc_vport, disable);
 
        if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
        fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
        fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
        fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
-       fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+       fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+                       (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
        fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
        fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
        if (IS_CNA_CAPABLE(ha))
                speed = FC_PORTSPEED_10GBIT;
+       else if (IS_QLA2031(ha))
+               speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+                   FC_PORTSPEED_4GBIT;
        else if (IS_QLA25XX(ha))
                speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
                    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
index bc3cc6d91117ab58b103e889370c89f8402bd6f9..c68883806c54b8092e81ff82f62db06fd8ee822f 100644 (file)
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 
                /* Initialize all required  fields of fcport */
                fcport->vha = vha;
-               fcport->vp_idx = vha->vp_idx;
                fcport->d_id.b.al_pa =
                        bsg_job->request->rqst_data.h_els.port_id[0];
                fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 
        /* Initialize all required  fields of fcport */
        fcport->vha = vha;
-       fcport->vp_idx = vha->vp_idx;
        fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
        fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
        fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
        int rval = 0;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                goto done_set_internal;
 
        new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
        uint16_t new_config[4];
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                goto done_reset_internal;
 
        memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
        elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 
        if ((ha->current_topology == ISP_CFG_F ||
-           (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
-           ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+           ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
            le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
            && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
                elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
        if (rval)
                return rval;
 
+       /* Set the isp82xx_no_md_cap not to capture minidump */
+       ha->flags.isp82xx_no_md_cap = 1;
+
        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
            bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
            ha->optrom_region_size);
index 62324a1d55737b7a127bcc5bab74ebac66ab62aa..fdee5611f3e2afce938c7f7ad559ce5d8eecffe9 100644 (file)
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0120       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x113e       | 0x112c-0x112e  |
+ * | Module Init and Probe        |       0x0122       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x1140       | 0x111a-0x111b  |
+ * |                              |                    | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
  * | Device Discovery             |       0x2086       | 0x2020-0x2022  |
  * | Queue Command and IO tracing |       0x3030       | 0x3006,0x3008  |
  * |                              |                    | 0x302d-0x302e  |
- * | DPC Thread                   |       0x401c       |               |
- * | Async Events                 |       0x505d       | 0x502b-0x502f  |
+ * | DPC Thread                   |       0x401c       | 0x4002,0x4013  |
+ * | Async Events                 |       0x505f       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
- * | Timer Routines               |       0x6011       | 0x600e-0x600f  |
+ * | Timer Routines               |       0x6011       |                |
  * | User Space Interactions      |       0x709f       | 0x7018,0x702e, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
  * |                              |                    | 0x708c         |
  * | Task Management              |       0x803c       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
- * | AER/EEH                      |       0x900f       |               |
+ * | AER/EEH                      |       0x9011       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb054       | 0xb053         |
+ * | ISP82XX Specific             |       0xb054       | 0xb024         |
  * | MultiQ                       |       0xc00c       |               |
  * | Misc                         |       0xd010       |               |
+ * | Target Mode                 |       0xe06f       |                |
+ * | Target Mode Management      |       0xf071       |                |
+ * | Target Mode Task Management  |      0x1000b      |                |
  * ----------------------------------------------------------------------
  */
 
@@ -378,6 +382,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
        return (char *)iter_reg + ntohl(fcec->size);
 }
 
+static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+       uint32_t **last_chain)
+{
+       struct qla2xxx_mqueue_chain *q;
+       struct qla2xxx_mqueue_header *qh;
+       uint32_t num_queues;
+       int que;
+       struct {
+               int length;
+               void *ring;
+       } aq, *aqp;
+
+       if (!ha->tgt.atio_q_length)
+               return ptr;
+
+       num_queues = 1;
+       aqp = &aq;
+       aqp->length = ha->tgt.atio_q_length;
+       aqp->ring = ha->tgt.atio_ring;
+
+       for (que = 0; que < num_queues; que++) {
+               /* aqp = ha->atio_q_map[que]; */
+               q = ptr;
+               *last_chain = &q->type;
+               q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+               q->chain_size = htonl(
+                   sizeof(struct qla2xxx_mqueue_chain) +
+                   sizeof(struct qla2xxx_mqueue_header) +
+                   (aqp->length * sizeof(request_t)));
+               ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+               /* Add header. */
+               qh = ptr;
+               qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+               qh->number = htonl(que);
+               qh->size = htonl(aqp->length * sizeof(request_t));
+               ptr += sizeof(struct qla2xxx_mqueue_header);
+
+               /* Add data. */
+               memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+               ptr += aqp->length * sizeof(request_t);
+       }
+
+       return ptr;
+}
+
 static inline void *
 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 {
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        struct qla24xx_fw_dump *fw;
        uint32_t        ext_mem_cnt;
        void            *nxt;
+       void            *nxt_chain;
+       uint32_t        *last_chain = NULL;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
        if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        qla24xx_copy_eft(ha, nxt);
 
+       nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+       if (last_chain) {
+               ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+               *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+       }
+
+       /* Adjust valid length. */
+       ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
 qla24xx_fw_dump_failed_0:
        qla2xxx_dump_post_process(base_vha, rval);
 
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        /* Chain entries -- started with MQ. */
        nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
        nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
        if (last_chain) {
                ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
                *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        /* Chain entries -- started with MQ. */
        nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
        nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
        if (last_chain) {
                ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
                *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
        /* Chain entries -- started with MQ. */
        nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
        nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
        if (last_chain) {
                ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
                *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
index 2157bdf1569a87e3771efbe9a8dcc26833ce47ab..f278df8cce0f02988e95f85e3f65d82381150f47 100644 (file)
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
        uint32_t queue;
 #define TYPE_REQUEST_QUEUE     0x1
 #define TYPE_RESPONSE_QUEUE    0x2
+#define TYPE_ATIO_QUEUE                0x3
        uint32_t number;
        uint32_t size;
 };
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 #define ql_dbg_misc    0x00010000 /* For dumping everything that is not
                                    * not covered by upper categories
                                    */
+#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
+                                   * This is to be used with other levels where
+                                   * more verbosity is required. It might not
+                                   * be applicable to all the levels.
+                                   */
+#define ql_dbg_tgt     0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
index a2443031dbe76c68c617f26767e269105f075343..39007f53aec0284b9cd855592d786823bbad5786 100644 (file)
 #define RESPONSE_ENTRY_CNT_2100                64      /* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_2300                512     /* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_MQ          128     /* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX            4096    /* Number of ATIO entries. */
 
 struct req_que;
 
@@ -1234,11 +1235,27 @@ typedef struct {
  * ISP queue - response queue entry definition.
  */
 typedef struct {
-       uint8_t         data[60];
+       uint8_t         entry_type;             /* Entry type. */
+       uint8_t         entry_count;            /* Entry count. */
+       uint8_t         sys_define;             /* System defined. */
+       uint8_t         entry_status;           /* Entry Status. */
+       uint32_t        handle;                 /* System defined handle */
+       uint8_t         data[52];
        uint32_t        signature;
 #define RESPONSE_PROCESSED     0xDEADDEAD      /* Signature */
 } response_t;
 
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+       uint8_t         entry_type;             /* Entry type. */
+       uint8_t         entry_count;            /* Entry count. */
+       uint8_t         data[58];
+       uint32_t        signature;
+#define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
+};
+
 typedef union {
        uint16_t extended;
        struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
        struct fc_rport *rport, *drport;
        u32 supported_classes;
 
-       uint16_t vp_idx;
        uint8_t fc4_type;
        uint8_t scan_state;
 } fc_port_t;
 
+#define QLA_FCPORT_SCAN_NONE   0
+#define QLA_FCPORT_SCAN_FOUND  1
+
 /*
  * Fibre channel port/lun states.
  */
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
 #define FCF_LOGIN_NEEDED       BIT_1
 #define FCF_FCP2_DEVICE                BIT_2
 #define FCF_ASYNC_SENT         BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
 
 /* No loop ID flag. */
 #define FC_NO_LOOP_ID          0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
        uint32_t len;
 };
 
+struct qlt_hw_data {
+       /* Protected by hw lock */
+       uint32_t enable_class_2:1;
+       uint32_t enable_explicit_conf:1;
+       uint32_t ini_mode_force_reverse:1;
+       uint32_t node_name_set:1;
+
+       dma_addr_t atio_dma;    /* Physical address. */
+       struct atio *atio_ring; /* Base virtual address */
+       struct atio *atio_ring_ptr;     /* Current address. */
+       uint16_t atio_ring_index; /* Current index. */
+       uint16_t atio_q_length;
+
+       void *target_lport_ptr;
+       struct qla_tgt_func_tmpl *tgt_ops;
+       struct qla_tgt *qla_tgt;
+       struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+       uint16_t current_handle;
+
+       struct qla_tgt_vp_map *tgt_vp_map;
+       struct mutex tgt_mutex;
+       struct mutex tgt_host_action_mutex;
+
+       int saved_set;
+       uint16_t saved_exchange_count;
+       uint32_t saved_firmware_options_1;
+       uint32_t saved_firmware_options_2;
+       uint32_t saved_firmware_options_3;
+       uint8_t saved_firmware_options[2];
+       uint8_t saved_add_firmware_options[2];
+
+       uint8_t tgt_node_name[WWN_SIZE];
+};
+
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
                uint32_t        thermal_supported:1;
                uint32_t        isp82xx_reset_hdlr_active:1;
                uint32_t        isp82xx_reset_owner:1;
-               /* 28 bits */
+               uint32_t        isp82xx_no_md_cap:1;
+               uint32_t        host_shutting_down:1;
+               /* 30 bits */
        } flags;
 
        /* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
                                        /* ISP2322: red, green, amber. */
        uint16_t        zio_mode;
        uint16_t        zio_timer;
-       struct fc_host_statistics fc_host_stat;
 
        struct qla_msix_entry *msix_entries;
 
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
        int             cur_vport_count;
 
        struct qla_chip_state_84xx *cs84xx;
-       struct qla_statistics qla_stats;
        struct isp_operations *isp_ops;
        struct workqueue_struct *wq;
        struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
        dma_addr_t      md_tmplt_hdr_dma;
        void            *md_dump;
        uint32_t        md_dump_size;
+
+       struct qlt_hw_data tgt;
 };
 
 /*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
 #define FCOE_CTX_RESET_NEEDED  18      /* Initiate FCoE context reset */
 #define MPI_RESET_NEEDED       19      /* Initiate MPI FW reset */
 #define ISP_QUIESCE_NEEDED     20      /* Driver need some quiescence */
+#define SCR_PENDING            21      /* SCR in target mode */
 
        uint32_t        device_flags;
 #define SWITCH_FOUND           BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
        struct req_que *req;
        int             fw_heartbeat_counter;
        int             seconds_since_last_heartbeat;
+       struct fc_host_statistics fc_host_stat;
+       struct qla_statistics qla_stats;
 
        atomic_t        vref_count;
 } scsi_qla_host_t;
 
+#define SET_VP_IDX     1
+#define SET_AL_PA      2
+#define RESET_VP_IDX   3
+#define RESET_AL_PA    4
+struct qla_tgt_vp_map {
+       uint8_t idx;
+       scsi_qla_host_t *vha;
+};
+
 /*
  * Macros to help code, maintain, etc.
  */
index 9f065804bd12b830a458ab11b6a92c7936f0bc58..9eacd2df111b85108dd8b3e75c3ea0320ec927b5 100644 (file)
@@ -175,6 +175,7 @@ extern int  qla2x00_vp_abort_isp(scsi_qla_host_t *);
 /*
  * Global Function Prototypes in qla_iocb.c source file.
  */
+
 extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
 extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
 extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
 extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
 extern int qla24xx_dif_start_scsi(srb_t *);
 
+extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -238,6 +241,9 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
 extern int
 qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
 
+extern int
+qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
+
 extern int
 qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
 
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
 extern void qla2x00_free_irqs(scsi_qla_host_t *);
 
 extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+
 /*
  * Global Function Prototypes in qla_sup.c source file.
  */
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
 extern void qla2x00_sp_timeout(unsigned long);
 extern void qla2x00_bsg_job_done(void *, void *, int);
 extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
 
 /* Interrupt related */
 extern irqreturn_t qla82xx_intr_handler(int, void *);
index 3128f80441f5378090156e98edbd1ee6798a2033..05260d25fe469f8e28bfba0807a874c4e2173acd 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
        ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
        ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
 
-       ct_req->req.rff_id.fc4_feature = BIT_1;
+       qlt_rff_id(vha, ct_req);
+
        ct_req->req.rff_id.fc4_type = 0x08;             /* SCSI - FCP */
 
        /* Execute MS IOCB */
index b9465643396b0c40e7fb7fbab19249f5c5cbded6..ca5084743135cf358c397dfccaa84fbbbbfece95 100644 (file)
@@ -17,6 +17,9 @@
 #include <asm/prom.h>
 #endif
 
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
 /*
 *  QLogic ISP2x00 Hardware Support Function Prototypes.
 */
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
                        return QLA_FUNCTION_FAILED;
                }
        }
-       rval = qla2x00_init_rings(vha);
+
+       if (qla_ini_mode_enabled(vha))
+               rval = qla2x00_init_rings(vha);
+
        ha->flags.chip_reset_done = 1;
 
        if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                        mq_size += ha->max_rsp_queues *
                            (rsp->length * sizeof(response_t));
                }
+               if (ha->tgt.atio_q_length)
+                       mq_size += ha->tgt.atio_q_length * sizeof(request_t);
                /* Allocate memory for Fibre Channel Event Buffer. */
                if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
                        goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
        icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
        icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
+       /* Setup ATIO queue dma pointers for target mode */
+       icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+       icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+       icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+       icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
        if (ha->mqenable || IS_QLA83XX(ha)) {
                icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
                icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
                WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
                WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
        }
+       qlt_24xx_config_rings(vha, reg);
+
        /* PCI posting */
        RD_REG_DWORD(&ioreg->hccr);
 }
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
 
        spin_unlock(&ha->vport_slock);
 
+       ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+       ha->tgt.atio_ring_index = 0;
+       /* Initialize ATIO queue entries */
+       qlt_init_atio_q_entries(vha);
+
        ha->isp_ops->config_rings(vha);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        vha->d_id.b.area = area;
        vha->d_id.b.al_pa = al_pa;
 
+       spin_lock(&ha->vport_slock);
+       qlt_update_vp_map(vha, SET_AL_PA);
+       spin_unlock(&ha->vport_slock);
+
        if (!vha->flags.init_done)
                ql_log(ql_log_info, vha, 0x2010,
                    "Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
            nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
                /* Reset NVRAM data. */
                ql_log(ql_log_warn, vha, 0x0064,
-                   "Inconisistent NVRAM "
+                   "Inconsistent NVRAM "
                    "detected: checksum=0x%x id=%c version=0x%x.\n",
                    chksum, nv->id[0], nv->nvram_version);
                ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
        if (IS_QLA23XX(ha)) {
                nv->firmware_options[0] |= BIT_2;
                nv->firmware_options[0] &= ~BIT_3;
-               nv->firmware_options[0] &= ~BIT_6;
+               nv->special_options[0] &= ~BIT_6;
                nv->add_firmware_options[1] |= BIT_5 | BIT_4;
 
                if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
 {
        fc_port_t *fcport = data;
        struct fc_rport *rport;
+       scsi_qla_host_t *vha = fcport->vha;
        unsigned long flags;
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        rport = fcport->drport ? fcport->drport: fcport->rport;
        fcport->drport = NULL;
        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
-       if (rport)
+       if (rport) {
                fc_remote_port_delete(rport);
+               /*
+                * Release the target mode FC NEXUS in qla_target.c code
+                * if target mod is enabled.
+                */
+               qlt_fc_port_deleted(vha, fcport);
+       }
 }
 
 /**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
 
        /* Setup fcport template structure. */
        fcport->vha = vha;
-       fcport->vp_idx = vha->vp_idx;
        fcport->port_type = FCT_UNKNOWN;
        fcport->loop_id = FC_NO_LOOP_ID;
        qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
        fcport->supported_classes = FC_COS_UNSPECIFIED;
+       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
 
        return fcport;
 }
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
                new_fcport->d_id.b.area = area;
                new_fcport->d_id.b.al_pa = al_pa;
                new_fcport->loop_id = loop_id;
-               new_fcport->vp_idx = vha->vp_idx;
                rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
                if (rval2 != QLA_SUCCESS) {
                        ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 
                if (!found) {
                        /* New device, add to fcports list. */
-                       if (vha->vp_idx) {
-                               new_fcport->vha = vha;
-                               new_fcport->vp_idx = vha->vp_idx;
-                       }
                        list_add_tail(&new_fcport->list, &vha->vp_fcports);
 
                        /* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
 static void
 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-#define LS_UNKNOWN      2
-       static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
        char *link_speed;
        int rval;
        uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
                    fcport->port_name[6], fcport->port_name[7], rval,
                    fcport->fp_speed, mb[0], mb[1]);
        } else {
-               link_speed = link_speeds[LS_UNKNOWN];
-               if (fcport->fp_speed < 5)
-                       link_speed = link_speeds[fcport->fp_speed];
-               else if (fcport->fp_speed == 0x13)
-                       link_speed = link_speeds[5];
+               link_speed = qla2x00_get_link_speed_str(ha);
                ql_dbg(ql_dbg_disc, vha, 0x2005,
                    "iIDMA adjusted to %s GB/s "
                    "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
                    "Unable to allocate fc remote port.\n");
                return;
        }
+       /*
+        * Create target mode FC NEXUS in qla_target.c if target mode is
+        * enabled..
+        */
+       qlt_fc_port_added(vha, fcport);
+
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        *((fc_port_t **)rport->dd_data) = fcport;
        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
 qla2x00_configure_fabric(scsi_qla_host_t *vha)
 {
        int     rval;
-       fc_port_t       *fcport, *fcptemp;
+       fc_port_t       *fcport;
        uint16_t        next_loopid;
        uint16_t        mb[MAILBOX_REGISTER_COUNT];
        uint16_t        loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                    0xfc, mb, BIT_1|BIT_0);
                if (rval != QLA_SUCCESS) {
                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       return rval;
+                       break;
                }
                if (mb[0] != MBS_COMMAND_COMPLETE) {
                        ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        }
                }
 
-#define QLA_FCPORT_SCAN                1
-#define QLA_FCPORT_FOUND       2
-
-               list_for_each_entry(fcport, &vha->vp_fcports, list) {
-                       fcport->scan_state = QLA_FCPORT_SCAN;
-               }
-
                rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
                if (rval != QLA_SUCCESS)
                        break;
 
-               /*
-                * Logout all previous fabric devices marked lost, except
-                * FCP2 devices.
-                */
+               /* Add new ports to existing port list */
+               list_splice_tail_init(&new_fcports, &vha->vp_fcports);
+
+               /* Starting free loop ID. */
+               next_loopid = ha->min_external_loopid;
+
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                                break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
                                continue;
 
-                       if (fcport->scan_state == QLA_FCPORT_SCAN &&
+                       /* Logout lost/gone fabric devices (non-FCP2) */
+                       if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
                                qla2x00_mark_device_lost(vha, fcport,
                                    ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                                            fcport->d_id.b.domain,
                                            fcport->d_id.b.area,
                                            fcport->d_id.b.al_pa);
-                                       fcport->loop_id = FC_NO_LOOP_ID;
                                }
-                       }
-               }
-
-               /* Starting free loop ID. */
-               next_loopid = ha->min_external_loopid;
-
-               /*
-                * Scan through our port list and login entries that need to be
-                * logged in.
-                */
-               list_for_each_entry(fcport, &vha->vp_fcports, list) {
-                       if (atomic_read(&vha->loop_down_timer) ||
-                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
-                               break;
-
-                       if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
-                           (fcport->flags & FCF_LOGIN_NEEDED) == 0)
                                continue;
-
-                       if (fcport->loop_id == FC_NO_LOOP_ID) {
-                               fcport->loop_id = next_loopid;
-                               rval = qla2x00_find_new_loop_id(
-                                   base_vha, fcport);
-                               if (rval != QLA_SUCCESS) {
-                                       /* Ran out of IDs to use */
-                                       break;
-                               }
                        }
-                       /* Login and update database */
-                       qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-               }
-
-               /* Exit if out of loop IDs. */
-               if (rval != QLA_SUCCESS) {
-                       break;
-               }
-
-               /*
-                * Login and add the new devices to our port list.
-                */
-               list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
-                       if (atomic_read(&vha->loop_down_timer) ||
-                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
-                               break;
-
-                       /* Find a new loop ID to use. */
-                       fcport->loop_id = next_loopid;
-                       rval = qla2x00_find_new_loop_id(base_vha, fcport);
-                       if (rval != QLA_SUCCESS) {
-                               /* Ran out of IDs to use */
-                               break;
+                       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
+
+                       /* Login fabric devices that need a login */
+                       if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
+                           atomic_read(&vha->loop_down_timer) == 0) {
+                               if (fcport->loop_id == FC_NO_LOOP_ID) {
+                                       fcport->loop_id = next_loopid;
+                                       rval = qla2x00_find_new_loop_id(
+                                           base_vha, fcport);
+                                       if (rval != QLA_SUCCESS) {
+                                               /* Ran out of IDs to use */
+                                               continue;
+                                       }
+                               }
                        }
 
                        /* Login and update database */
                        qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-
-                       if (vha->vp_idx) {
-                               fcport->vha = vha;
-                               fcport->vp_idx = vha->vp_idx;
-                       }
-                       list_move_tail(&fcport->list, &vha->vp_fcports);
                }
        } while (0);
 
-       /* Free all new device structures not processed. */
-       list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
-               list_del(&fcport->list);
-               kfree(fcport);
-       }
-
        if (rval) {
                ql_dbg(ql_dbg_disc, vha, 0x2068,
                    "Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                            WWN_SIZE))
                                continue;
 
-                       fcport->scan_state = QLA_FCPORT_FOUND;
+                       fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
 
                        found++;
 
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
                        if (mb[10] & BIT_1)
                                fcport->supported_classes |= FC_COS_CLASS3;
 
+                       if (IS_FWI2_CAPABLE(ha)) {
+                               if (mb[10] & BIT_7)
+                                       fcport->flags |=
+                                           FCF_CONF_COMP_SUPPORTED;
+                       }
+
                        rval = QLA_SUCCESS;
                        break;
                } else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
                vha->flags.online = 0;
        ha->flags.chip_reset_done = 0;
        clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-       ha->qla_stats.total_isp_aborts++;
+       vha->qla_stats.total_isp_aborts++;
 
        ql_log(ql_log_info, vha, 0x00af,
            "Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
+       unsigned long flags;
 
        /* If firmware needs to be loaded */
        if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
                        qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
 
                        vha->flags.online = 1;
+
+                       /*
+                        * Process any ATIO queue entries that came in
+                        * while we weren't online.
+                        */
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+                       if (qla_tgt_mode_enabled(vha))
+                               qlt_24xx_process_atio_queue(vha);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
                        wait_time = 256;
                        do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
            nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
                /* Reset NVRAM data. */
                ql_log(ql_log_warn, vha, 0x006b,
-                   "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+                   "Inconsistent NVRAM detected: checksum=0x%x id=%c "
                    "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
                ql_log(ql_log_warn, vha, 0x006c,
                    "Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
                rval = 1;
        }
 
+       if (!qla_ini_mode_enabled(vha)) {
+               /* Don't enable full login after initial LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+               /* Don't enable LIP full login for initiator */
+               nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+       }
+
+       qlt_24xx_config_nvram_stage1(vha, nv);
+
        /* Reset Initialization control block */
        memset(icb, 0, ha->init_cb_size);
 
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
            "QLA2462");
 
-       /* Use alternate WWN? */
+       qlt_24xx_config_nvram_stage2(vha, icb);
+
        if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+               /* Use alternate WWN? */
                memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
                memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
        }
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
            nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
                /* Reset NVRAM data. */
                ql_log(ql_log_info, vha, 0x0073,
-                   "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+                   "Inconsistent NVRAM detected: checksum=0x%x id=%c "
                    "version=0x%x.\n", chksum, nv->id[0],
                    le16_to_cpu(nv->nvram_version));
                ql_log(ql_log_info, vha, 0x0074,
index eac9509244971ba9e324803ca531014e3f917dad..70dbf53d9e0f4fe0762073cb0bfec1508251fc8f 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
 {
        uint16_t cflags;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct scsi_qla_host *vha = sp->fcport->vha;
 
        cflags = 0;
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
                cflags = CF_WRITE;
-               sp->fcport->vha->hw->qla_stats.output_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
                cflags = CF_READ;
-               sp->fcport->vha->hw->qla_stats.input_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
        return (cflags);
 }
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
                else
                        req->cnt = req->length -
                            (req->ring_index - cnt);
+               /* If still no head room then bail out */
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
        }
-       if (req->cnt < (req_cnt + 2))
-               goto queuing_error;
 
        /* Build command packet */
        req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
 /**
  * qla2x00_start_iocbs() - Execute the IOCB command
  */
-static void
+void
 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 {
        struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
        return (ret);
 }
 
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+       if (ha_locked) {
+               if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+                                       MK_SYNC_ALL) != QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+       } else {
+               if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+                                       MK_SYNC_ALL) != QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+       }
+       vha->marker_needed = 0;
+
+       return QLA_SUCCESS;
+}
+
 /**
  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  * Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
                cmd_pkt->control_flags =
                    __constant_cpu_to_le16(CF_WRITE_DATA);
-               ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
                cmd_pkt->control_flags =
                    __constant_cpu_to_le16(CF_READ_DATA);
-               ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
        cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
                cmd_pkt->task_mgmt_flags =
                    __constant_cpu_to_le16(TMF_WRITE_DATA);
-               sp->fcport->vha->hw->qla_stats.output_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
                cmd_pkt->task_mgmt_flags =
                    __constant_cpu_to_le16(TMF_READ_DATA);
-               sp->fcport->vha->hw->qla_stats.input_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
        /* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                return QLA_SUCCESS;
        }
 
-       cmd_pkt->vp_index = sp->fcport->vp_idx;
+       cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
                else
                        req->cnt = req->length -
                                (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
        }
-       if (req->cnt < (req_cnt + 2))
-               goto queuing_error;
 
        /* Build command packet. */
        req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-       cmd_pkt->vp_index = sp->fcport->vp_idx;
+       cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
                else
                        req->cnt = req->length -
                                (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
        }
 
-       if (req->cnt < (req_cnt + 2))
-               goto queuing_error;
-
        status |= QDSS_GOT_Q_SPACE;
 
        /* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
        logio->port_id[1] = sp->fcport->d_id.b.area;
        logio->port_id[2] = sp->fcport->d_id.b.domain;
-       logio->vp_index = sp->fcport->vp_idx;
+       logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
            sp->fcport->d_id.b.al_pa);
-       mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+       mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
 }
 
 static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
        logio->port_id[1] = sp->fcport->d_id.b.area;
        logio->port_id[2] = sp->fcport->d_id.b.domain;
-       logio->vp_index = sp->fcport->vp_idx;
+       logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
            sp->fcport->d_id.b.al_pa);
-       mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+       mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
        /* Implicit: mbx->mbx10 = 0. */
 }
 
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-       logio->vp_index = sp->fcport->vp_idx;
+       logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
-       mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+       mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
 }
 
 static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
        tsk->port_id[0] = fcport->d_id.b.al_pa;
        tsk->port_id[1] = fcport->d_id.b.area;
        tsk->port_id[2] = fcport->d_id.b.domain;
-       tsk->vp_index = fcport->vp_idx;
+       tsk->vp_index = fcport->vha->vp_idx;
 
        if (flags == TCF_LUN_RESET) {
                int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
         els_iocb->handle = sp->handle;
         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
-        els_iocb->vp_index = sp->fcport->vp_idx;
+       els_iocb->vp_index = sp->fcport->vha->vp_idx;
         els_iocb->sof_type = EST_SOFI3;
         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
 
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
         ct_iocb->handle = sp->handle;
 
        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-       ct_iocb->vp_index = sp->fcport->vp_idx;
+       ct_iocb->vp_index = sp->fcport->vha->vp_idx;
         ct_iocb->comp_status = __constant_cpu_to_le16(0);
 
        ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
                        else
                                req->cnt = req->length -
                                        (req->ring_index - cnt);
+                       if (req->cnt < (req_cnt + 2))
+                               goto queuing_error;
                }
 
-               if (req->cnt < (req_cnt + 2))
-                       goto queuing_error;
-
                ctx = sp->u.scmd.ctx =
                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
                if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
                if (!ctx->fcp_cmnd) {
                        ql_log(ql_log_fatal, vha, 0x3011,
                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
-                       goto queuing_error_fcp_cmnd;
+                       goto queuing_error;
                }
 
                /* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-               cmd_pkt->vp_index = sp->fcport->vp_idx;
+               cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
                /* Build IOCB segments */
                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-               cmd_pkt->vp_index = sp->fcport->vp_idx;
+               cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
index ce42288049b5a20a5c57646ecf077886a1239ebb..6f67a9d4998b6d43fbab53a0a75918c6f607fbf0 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
                    "IDC failed to post ACK.\n");
 }
 
+#define LS_UNKNOWN     2
+char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+{
+       static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
+       char *link_speed;
+       int fw_speed = ha->link_data_rate;
+
+       if (IS_QLA2100(ha) || IS_QLA2200(ha))
+               link_speed = link_speeds[0];
+       else if (fw_speed == 0x13)
+               link_speed = link_speeds[6];
+       else {
+               link_speed = link_speeds[LS_UNKNOWN];
+               if (fw_speed < 6)
+                       link_speed =
+                           link_speeds[fw_speed];
+       }
+
+       return link_speed;
+}
+
 /**
  * qla2x00_async_event() - Process aynchronous events.
  * @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
 void
 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 {
-#define LS_UNKNOWN     2
-       static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
-       char            *link_speed;
        uint16_t        handle_cnt;
        uint16_t        cnt, mbx;
        uint32_t        handles[5];
@@ -454,8 +474,8 @@ skip_rio:
        case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
                ql_dbg(ql_dbg_async, vha, 0x5008,
                    "Asynchronous WAKEUP_THRES.\n");
-               break;
 
+               break;
        case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
                ql_dbg(ql_dbg_async, vha, 0x5009,
                    "LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
                break;
 
        case MBA_LOOP_UP:               /* Loop Up Event */
-               if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-                       link_speed = link_speeds[0];
+               if (IS_QLA2100(ha) || IS_QLA2200(ha))
                        ha->link_data_rate = PORT_SPEED_1GB;
-               } else {
-                       link_speed = link_speeds[LS_UNKNOWN];
-                       if (mb[1] < 6)
-                               link_speed = link_speeds[mb[1]];
-                       else if (mb[1] == 0x13)
-                               link_speed = link_speeds[6];
+               else
                        ha->link_data_rate = mb[1];
-               }
 
                ql_dbg(ql_dbg_async, vha, 0x500a,
-                   "LOOP UP detected (%s Gbps).\n", link_speed);
+                   "LOOP UP detected (%s Gbps).\n",
+                   qla2x00_get_link_speed_str(ha));
 
                vha->flags.management_server_logged_in = 0;
                qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
                        ql_dbg(ql_dbg_async, vha, 0x5010,
                            "Port unavailable %04x %04x %04x.\n",
                            mb[1], mb[2], mb[3]);
+                       ql_log(ql_log_warn, vha, 0x505e,
+                           "Link is offline.\n");
 
                        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                                atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
                        ql_dbg(ql_dbg_async, vha, 0x5011,
                            "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
                            mb[1], mb[2], mb[3]);
+
+                       qlt_async_event(mb[0], vha, mb);
                        break;
                }
 
                ql_dbg(ql_dbg_async, vha, 0x5012,
                    "Port database changed %04x %04x %04x.\n",
                    mb[1], mb[2], mb[3]);
+               ql_log(ql_log_warn, vha, 0x505f,
+                   "Link is operational (%s Gbps).\n",
+                   qla2x00_get_link_speed_str(ha));
 
                /*
                 * Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
 
                qla2x00_mark_all_devices_lost(vha, 1);
 
+               if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
+                       set_bit(SCR_PENDING, &vha->dpc_flags);
+
                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+
+               qlt_async_event(mb[0], vha, mb);
                break;
 
        case MBA_RSCN_UPDATE:           /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
                    mb[0], mb[1], mb[2], mb[3]);
        }
 
+       qlt_async_event(mb[0], vha, mb);
+
        if (!vha->vp_idx && ha->num_vhosts)
                qla2x00_alert_all_vps(rsp, mb);
 }
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                } else if (iop[0] & BIT_5)
                        fcport->port_type = FCT_INITIATOR;
 
+               if (iop[0] & BIT_7)
+                       fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
                if (logio->io_parameter[7] || logio->io_parameter[8])
                        fcport->supported_classes |= FC_COS_CLASS2;
                if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
 
                if (pkt->entry_status != 0) {
                        qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
+
+                       (void)qlt_24xx_process_response_error(vha, pkt);
+
                        ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
                        wmb();
                        continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
                 case ELS_IOCB_TYPE:
                        qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
                        break;
+               case ABTS_RECV_24XX:
+                       /* ensure that the ATIO queue is empty */
+                       qlt_24xx_process_atio_queue(vha);
+               case ABTS_RESP_24XX:
+               case CTIO_TYPE7:
+               case NOTIFY_ACK_TYPE:
+                       qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+                       break;
                case MARKER_TYPE:
                        /* Do nothing in this case, this check is to prevent it
                         * from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
                case 0x14:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
+               case 0x1C: /* ATIO queue updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       break;
+               case 0x1D: /* ATIO and response queues updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       qla24xx_process_response_queue(vha, rsp);
+                       break;
                default:
                        ql_dbg(ql_dbg_async, vha, 0x504f,
                            "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
                case 0x14:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
+               case 0x1C: /* ATIO queue updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       break;
+               case 0x1D: /* ATIO and response queues updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       qla24xx_process_response_queue(vha, rsp);
+                       break;
                default:
                        ql_dbg(ql_dbg_async, vha, 0x5051,
                            "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
 qla2x00_free_irqs(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       struct rsp_que *rsp = ha->rsp_q_map[0];
+       struct rsp_que *rsp;
+
+       /*
+        * We need to check that ha->rsp_q_map is valid in case we are called
+        * from a probe failure context.
+        */
+       if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+               return;
+       rsp = ha->rsp_q_map[0];
 
        if (ha->flags.msix_enabled)
                qla24xx_disable_msix(ha);
index b4a23394a7bd8f9a225ba2acd77ee19fb018f691..d5ce92c0a8fcef8e246ef25599e9d635a9a5b454 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/delay.h>
 #include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        ictrl = RD_REG_WORD(&reg->isp.ictrl);
                }
                ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
-                   "MBX Command timeout for cmd %x.\n", command);
-               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
-                   "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
-               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
-                   "mb[0] = 0x%x.\n", mb0);
+                   "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                   "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
                ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
 
                /*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                            CRB_NIU_XG_PAUSE_CTL_P1);
                                }
                                ql_log(ql_log_info, base_vha, 0x101c,
-                                   "Mailbox cmd timeout occured, cmd=0x%x, "
+                                   "Mailbox cmd timeout occurred, cmd=0x%x, "
                                    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
                                    "abort.\n", command, mcp->mb[0],
                                    ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                            CRB_NIU_XG_PAUSE_CTL_P1);
                                }
                                ql_log(ql_log_info, base_vha, 0x101e,
-                                   "Mailbox cmd timeout occured, cmd=0x%x, "
+                                   "Mailbox cmd timeout occurred, cmd=0x%x, "
                                    "mb[0]=0x%x. Scheduling ISP abort ",
                                    command, mcp->mb[0]);
                                set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+           "Entered %s.\n", __func__);
 
        if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
                mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
                ql_dbg(ql_dbg_mbx, vha, 0x1023,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
        mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
                if (IS_FWI2_CAPABLE(ha)) {
-                       ql_dbg(ql_dbg_mbx, vha, 0x1027,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
                            "Done exchanges=%x.\n", mcp->mb[1]);
                } else {
-                       ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+                           "Done %s.\n", __func__);
                }
        }
 
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
        mbx_cmd_t       *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
        mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
                        ha->fw_attributes_h = mcp->mb[15];
                        ha->fw_attributes_ext[0] = mcp->mb[16];
                        ha->fw_attributes_ext[1] = mcp->mb[17];
-                       ql_dbg(ql_dbg_mbx, vha, 0x1139,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
                            "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
                            __func__, mcp->mb[15], mcp->mb[6]);
                } else
-                       ql_dbg(ql_dbg_mbx, vha, 0x112f,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
                            "%s: FwAttributes [Upper]  invalid, MB6:%04x\n",
                            __func__, mcp->mb[6]);
        }
@@ -576,7 +579,8 @@ failed:
                ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+                   "Done %s.\n", __func__);
        }
        return rval;
 }
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
        mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
                fwopts[2] = mcp->mb[2];
                fwopts[3] = mcp->mb[3];
 
-               ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
        mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
                    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
        mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_VERIFY_CHECKSUM;
        mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
                    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
                    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
        mbx_cmd_t       mc;
        mbx_cmd_t       *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_IOCB_COMMAND_A64;
        mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
                /* Mask reserved bits. */
                sts_entry->entry_status &=
                    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
-               ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
        struct req_que *req = vha->req;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
-       ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+           "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
        l = l;
        vha = fcport->vha;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+           "Entered %s.\n", __func__);
 
        req = vha->hw->req_q_map[0];
        rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
        if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+                   "Failed=%x.\n", rval);
        }
 
        /* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
                ql_dbg(ql_dbg_mbx, vha, 0x1040,
                    "Failed to issue marker IOCB (%x).\n", rval2);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
 
        vha = fcport->vha;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+           "Entered %s.\n", __func__);
 
        req = vha->hw->req_q_map[0];
        rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
                ql_dbg(ql_dbg_mbx, vha, 0x1044,
                    "Failed to issue marker IOCB (%x).\n", rval2);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
        mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                /*EMPTY*/
                ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+                   "Done %s.\n", __func__);
 
                if (IS_CNA_CAPABLE(vha->hw)) {
                        vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_RETRY_COUNT;
        mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
                        *tov = ratov;
                }
 
-               ql_dbg(ql_dbg_mbx, vha, 0x104b,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
                    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
        }
 
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+           "Entered %s.\n", __func__);
 
        if (IS_QLA82XX(ha) && ql2xdbwr)
                qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+/*
+ * qla2x00_get_node_name_list
+ *      Issue get node name list mailbox command, kmalloc()
+ *      and return the resulting list. Caller must kfree() it!
+ *
+ * Input:
+ *      ha = adapter state pointer.
+ *      out_data = resulting list
+ *      out_len = length of the resulting list
+ *
+ * Returns:
+ *      qla2x00 local function return status code.
+ *
+ * Context:
+ *      Kernel context.
+ */
+int
+qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_port_24xx_data *list = NULL;
+       void *pmap;
+       mbx_cmd_t mc;
+       dma_addr_t pmap_dma;
+       ulong dma_size;
+       int rval, left;
+
+       left = 1;
+       while (left > 0) {
+               dma_size = left * sizeof(*list);
+               pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
+                                        &pmap_dma, GFP_KERNEL);
+               if (!pmap) {
+                       ql_log(ql_log_warn, vha, 0x113f,
+                           "%s(%ld): DMA Alloc failed of %ld\n",
+                           __func__, vha->host_no, dma_size);
+                       rval = QLA_MEMORY_ALLOC_FAILED;
+                       goto out;
+               }
+
+               mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
+               mc.mb[1] = BIT_1 | BIT_3;
+               mc.mb[2] = MSW(pmap_dma);
+               mc.mb[3] = LSW(pmap_dma);
+               mc.mb[6] = MSW(MSD(pmap_dma));
+               mc.mb[7] = LSW(MSD(pmap_dma));
+               mc.mb[8] = dma_size;
+               mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
+               mc.in_mb = MBX_0|MBX_1;
+               mc.tov = 30;
+               mc.flags = MBX_DMA_IN;
+
+               rval = qla2x00_mailbox_command(vha, &mc);
+               if (rval != QLA_SUCCESS) {
+                       if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
+                           (mc.mb[1] == 0xA)) {
+                               left += le16_to_cpu(mc.mb[2]) /
+                                   sizeof(struct qla_port_24xx_data);
+                               goto restart;
+                       }
+                       goto out_free;
+               }
+
+               left = 0;
+
+               list = kzalloc(dma_size, GFP_KERNEL);
+               if (!list) {
+                       ql_log(ql_log_warn, vha, 0x1140,
+                           "%s(%ld): failed to allocate node names list "
+                           "structure.\n", __func__, vha->host_no);
+                       rval = QLA_MEMORY_ALLOC_FAILED;
+                       goto out_free;
+               }
+
+               memcpy(list, pmap, dma_size);
+restart:
+               dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
        }
 
+       *out_data = list;
+       *out_len = dma_size;
+
+out:
+       return rval;
+
+out_free:
+       dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
        return rval;
 }
 
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
        dma_addr_t pd_dma;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+           "Entered %s.\n", __func__);
 
        pd24 = NULL;
        pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
                        fcport->port_type = FCT_INITIATOR;
                else
                        fcport->port_type = FCT_TARGET;
+
+               /* Passback COS information. */
+               fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+                               FC_COS_CLASS2 : FC_COS_CLASS3;
+
+               if (pd24->prli_svc_param_word_3[0] & BIT_7)
+                       fcport->flags |= FCF_CONF_COMP_SUPPORTED;
        } else {
                uint64_t zero = 0;
 
@@ -1378,7 +1502,8 @@ gpd_error_out:
                    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
                    mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
        mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
                ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_PORT_NAME;
        mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
                        name[7] = LSB(mcp->mb[7]);
                }
 
-               ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+           "Entered %s.\n", __func__);
 
        if (IS_CNA_CAPABLE(vha->hw)) {
                /* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+           "Entered %s.\n", __func__);
 
-       ql_dbg(ql_dbg_mbx, vha, 0x105e,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
            "Retry cnt=%d ratov=%d total tov=%d.\n",
            vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
 
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        struct req_que *req;
        struct rsp_que *rsp;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+           "Entered %s.\n", __func__);
 
        if (ha->flags.cpu_affinity_enabled)
                req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                        break;
                }
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+                   "Done %s.\n", __func__);
 
                iop[0] = le32_to_cpu(lg->io_parameter[0]);
 
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                        mb[10] |= BIT_0;        /* Class 2. */
                if (lg->io_parameter[9] || lg->io_parameter[10])
                        mb[10] |= BIT_1;        /* Class 3. */
+               if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+                       mb[10] |= BIT_7;        /* Confirmed Completion
+                                                * Allowed
+                                                */
        }
 
        dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
        mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+           "Entered %s.\n", __func__);
 
        if (IS_FWI2_CAPABLE(ha))
                return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+                   "Done %s.\n", __func__);
        }
 
        return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        struct req_que *req;
        struct rsp_que *rsp;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+           "Entered %s.\n", __func__);
 
        lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
        if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                    le32_to_cpu(lg->io_parameter[1]));
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
        mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_LIP_FULL_LOGIN;
        mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+           "Entered %s.\n", __func__);
 
        if (id_list == NULL)
                return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
                ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
        } else {
                *entries = mcp->mb[1];
-               ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
        mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
                ql_dbg(ql_dbg_mbx, vha, 0x107d,
                    "Failed mb[0]=%x.\n", mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x107e,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
                    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
                    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
                    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
        dma_addr_t pmap_dma;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+           "Entered %s.\n", __func__);
 
        pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
        if (pmap  == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
        rval = qla2x00_mailbox_command(vha, mcp);
 
        if (rval == QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x1081,
+               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
                    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
                    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
                ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
        uint32_t *siter, *diter, dwords;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_LINK_STATUS;
        mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
                        rval = QLA_FUNCTION_FAILED;
                } else {
                        /* Copy over data -- firmware data is LE. */
-                       ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+                           "Done %s.\n", __func__);
                        dwords = offsetof(struct link_statistics, unused1) / 4;
                        siter = diter = &stats->link_fail_cnt;
                        while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
        mbx_cmd_t *mcp = &mc;
        uint32_t *siter, *diter, dwords;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
        mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
                            "Failed mb[0]=%x.\n", mcp->mb[0]);
                        rval = QLA_FUNCTION_FAILED;
                } else {
-                       ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+                           "Done %s.\n", __func__);
                        /* Copy over data -- firmware data is LE. */
                        dwords = sizeof(struct link_statistics) / 4;
                        siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = vha->req;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+           "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
        abt->port_id[0] = fcport->d_id.b.al_pa;
        abt->port_id[1] = fcport->d_id.b.area;
        abt->port_id[2] = fcport->d_id.b.domain;
-       abt->vp_index = fcport->vp_idx;
+       abt->vp_index = fcport->vha->vp_idx;
 
        abt->req_que_no = cpu_to_le16(req->id);
 
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
                    le16_to_cpu(abt->nport_handle));
                rval = QLA_FUNCTION_FAILED;
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        ha = vha->hw;
        req = vha->req;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+           "Entered %s.\n", __func__);
 
        if (ha->flags.cpu_affinity_enabled)
                rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
        tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
        tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
-       tsk->p.tsk.vp_index = fcport->vp_idx;
+       tsk->p.tsk.vp_index = fcport->vha->vp_idx;
        if (type == TCF_LUN_RESET) {
                int_to_scsilun(l, &tsk->p.tsk.lun);
                host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        } else if (le16_to_cpu(sts->scsi_status) &
            SS_RESPONSE_INFO_LEN_VALID) {
                if (le32_to_cpu(sts->rsp_data_len) < 4) {
-                       ql_dbg(ql_dbg_mbx, vha, 0x1097,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
                            "Ignoring inconsistent data length -- not enough "
                            "response info (%d).\n",
                            le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
                ql_dbg(ql_dbg_mbx, vha, 0x1099,
                    "Failed to issue marker IOCB (%x).\n", rval2);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
        if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
        mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SERDES_PARAMS;
        mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_STOP_FIRMWARE;
        mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
                if (mcp->mb[0] == MBS_INVALID_COMMAND)
                        rval = QLA_INVALID_COMMAND;
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
            !IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+                   "Done %s.\n", __func__);
 
                if (mb)
                        memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+                   "Done %s.\n", __func__);
 
                if (wr)
                        *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+           "Entered %s.\n", __func__);
 
        if (!IS_IIDMA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+                   "Done %s.\n", __func__);
                if (port_speed)
                        *port_speed = mcp->mb[3];
        }
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+           "Entered %s.\n", __func__);
 
        if (!IS_IIDMA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        }
 
        if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
+               ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+                   "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        scsi_qla_host_t *vp;
        unsigned long   flags;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+           "Entered %s.\n", __func__);
 
        if (rptid_entry->entry_status != 0)
                return;
 
        if (rptid_entry->format == 0) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
                    "Format 0 : Number of VPs setup %d, number of "
                    "VPs acquired %d.\n",
                    MSB(le16_to_cpu(rptid_entry->vp_count)),
                    LSB(le16_to_cpu(rptid_entry->vp_count)));
-               ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
                    "Primary port id %02x%02x%02x.\n",
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
                    rptid_entry->port_id[0]);
        } else if (rptid_entry->format == 1) {
                vp_idx = LSB(stat);
-               ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
                    "Format 1: VP[%d] enabled - status %d - with "
                    "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
 
        /* This can be called by the parent */
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+           "Entered %s.\n", __func__);
 
        vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
        if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
        vpmod->vp_count = 1;
        vpmod->vp_index1 = vha->vp_idx;
        vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+       qlt_modify_vp_config(vha, vpmod);
+
        memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
        memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
        vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
                rval = QLA_FUNCTION_FAILED;
        } else {
                /* EMPTY */
-               ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+                   "Done %s.\n", __func__);
                fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
        }
        dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
        int     vp_index = vha->vp_idx;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
            "Entered %s enabling index %d.\n", __func__, vp_index);
 
        if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
                    le16_to_cpu(vce->comp_status));
                rval = QLA_FUNCTION_FAILED;
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
-
-       /*
-        * This command is implicitly executed by firmware during login for the
-        * physical hosts
-        */
-       if (vp_idx == 0)
-               return QLA_FUNCTION_FAILED;
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
        mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+           "Entered %s.\n", __func__);
 
        if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
                mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
                ql_dbg(ql_dbg_mbx, vha, 0x1008,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
        unsigned long flags;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+           "Entered %s.\n", __func__);
 
        mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
        if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
                status[0] = le16_to_cpu(mn->p.rsp.comp_status);
                status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
                    le16_to_cpu(mn->p.rsp.failure_code) : 0;
-               ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
                    "cs=%x fc=%x.\n", status[0], status[1]);
 
                if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
                                retry = 1;
                        }
                } else {
-                       ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
                            "Firmware updated to %x.\n",
                            le32_to_cpu(mn->p.rsp.fw_ver));
 
@@ -3316,9 +3501,11 @@ verify_done:
        dma_pool_free(ha->s_dma_pool, mn, mn_dma);
 
        if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
+               ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+                   "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        struct device_reg_25xxmq __iomem *reg;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
        mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
                ql_dbg(ql_dbg_mbx, vha, 0x10d4,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        struct device_reg_25xxmq __iomem *reg;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
        mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
                ql_dbg(ql_dbg_mbx, vha, 0x10d7,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_IDC_ACK;
        memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
                ql_dbg(ql_dbg_mbx, vha, 0x10da,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+                   "Done %s.\n", __func__);
                *sector_size = mcp->mb[1];
        }
 
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
        mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
        mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
                    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_RESTART_MPI_FW;
        mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
                ql_dbg(ql_dbg_mbx, vha, 0x10e9,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
                ql_dbg(ql_dbg_mbx, vha, 0x10ec,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+           "Entered %s.\n", __func__);
 
        if (!IS_CNA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+                   "Done %s.\n", __func__);
 
 
                *actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+           "Entered %s.\n", __func__);
 
        if (!IS_CNA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
                ql_dbg(ql_dbg_mbx, vha, 0x10f5,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+                   "Done %s.\n", __func__);
                *data = mcp->mb[3] << 16 | mcp->mb[2];
        }
 
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
        mbx_cmd_t *mcp = &mc;
        uint32_t iter_cnt = 0x1;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+           "Entered %s.\n", __func__);
 
        memset(mcp->mb, 0 , sizeof(mcp->mb));
        mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
                    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
                    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+                   "Done %s.\n", __func__);
        }
 
        /* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+           "Entered %s.\n", __func__);
 
        memset(mcp->mb, 0 , sizeof(mcp->mb));
        mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+                   "Done %s.\n", __func__);
        }
 
        /* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
            "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
 
        mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
        if (rval != QLA_SUCCESS)
                ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
        else
-               ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+                   "Done %s.\n", __func__);
 
        return rval;
 }
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
                ql_dbg(ql_dbg_mbx, vha, 0x1101,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
 
        rval = QLA_SUCCESS;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+           "Entered %s.\n", __func__);
 
        clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
                ql_dbg(ql_dbg_mbx, vha, 0x1104,
                    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1107,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+                   "Done %s.\n", __func__);
                if (mcp->mb[1] != 0x7)
                        ha->link_data_rate = mcp->mb[1];
        }
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
                return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
                /* Copy all bits to preserve original value */
                memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
 
-               ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+                   "Done %s.\n", __func__);
        }
        return rval;
 }
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SET_PORT_CONFIG;
        /* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
                ql_dbg(ql_dbg_mbx, vha, 0x110d,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else
-               ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+                   "Done %s.\n", __func__);
 
        return rval;
 }
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
                return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
        uint8_t byte;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
+           "Entered %s.\n", __func__);
 
        /* Integer part */
        rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
        }
        *frac = (byte >> 6) * 25;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
+           "Done %s.\n", __func__);
 fail:
        return rval;
 }
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1016,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA82XX(ha))
                return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x100c,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        int rval = QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+           "Entered %s.\n", __func__);
 
        memset(mcp->mb, 0 , sizeof(mcp->mb));
        mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
                    (mcp->mb[1] << 16) | mcp->mb[0],
                    (mcp->mb[3] << 16) | mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+                   "Done %s.\n", __func__);
                ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
                if (!ha->md_template_size) {
                        ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        int rval = QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+           "Entered %s.\n", __func__);
 
        ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
           ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
                    ((mcp->mb[1] << 16) | mcp->mb[0]),
                    ((mcp->mb[3] << 16) | mcp->mb[2]));
        } else
-               ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+                   "Done %s.\n", __func__);
        return rval;
 }
 
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+           "Entered %s.\n", __func__);
 
        memset(mcp, 0, sizeof(mbx_cmd_t));
        mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
                ql_dbg(ql_dbg_mbx, vha, 0x1134,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+           "Entered %s.\n", __func__);
 
        memset(mcp, 0, sizeof(mbx_cmd_t));
        mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
                        led_cfg[4] = mcp->mb[5];
                        led_cfg[5] = mcp->mb[6];
                }
-               ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
        if (!IS_QLA82XX(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1127,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
                "Entered %s.\n", __func__);
 
        memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
                ql_dbg(ql_dbg_mbx, vha, 0x1128,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1129,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
                    "Done %s.\n", __func__);
        }
 
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
        if (!IS_QLA83XX(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_WRITE_REMOTE_REG;
        mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
                ql_dbg(ql_dbg_mbx, vha, 0x1131,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1132,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
                    "Done %s.\n", __func__);
        }
 
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
        mbx_cmd_t *mcp = &mc;
 
        if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-               ql_dbg(ql_dbg_mbx, vha, 0x113b,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
                    "Implicit LOGO Unsupported.\n");
                return QLA_FUNCTION_FAILED;
        }
 
 
-       ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n",  __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+           "Entering %s.\n",  __func__);
 
        /* Perform Implicit LOGO. */
        mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
                ql_dbg(ql_dbg_mbx, vha, 0x113d,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        else
-               ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+                   "Done %s.\n", __func__);
 
        return rval;
 }
index aa062a1b0ca496f4a1bc4f10e85f795d0d5d17b3..3e8b32419e68959440c8f85716867e6dfc897435 100644 (file)
@@ -6,6 +6,7 @@
  */
 #include "qla_def.h"
 #include "qla_gbl.h"
+#include "qla_target.h"
 
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
 
        spin_lock_irqsave(&ha->vport_slock, flags);
        list_add_tail(&vha->list, &ha->vp_list);
+
+       qlt_update_vp_map(vha, SET_VP_IDX);
+
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
                spin_lock_irqsave(&ha->vport_slock, flags);
        }
        list_del(&vha->list);
+       qlt_update_vp_map(vha, RESET_VP_IDX);
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
                ql_dbg(ql_dbg_vport, vha, 0xa001,
                    "Marking port dead, loop_id=0x%04x : %x.\n",
-                   fcport->loop_id, fcport->vp_idx);
+                   fcport->loop_id, fcport->vha->vp_idx);
 
                qla2x00_mark_device_lost(vha, fcport, 0, 0);
                qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
        atomic_set(&vha->loop_state, LOOP_DOWN);
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 
+       /* Remove port id from vp target map */
+       qlt_update_vp_map(vha, RESET_AL_PA);
+
        qla2x00_mark_vp_devices_dead(vha);
        atomic_set(&vha->vp_state, VP_FAILED);
        vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 static int
 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 {
-       ql_dbg(ql_dbg_dpc, vha, 0x4012,
-           "Entering %s.\n", __func__);
-       ql_dbg(ql_dbg_dpc, vha, 0x4013,
-           "vp_flags: 0x%lx.\n", vha->vp_flags);
+       ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+           "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
 
        qla2x00_do_work(vha);
 
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
                }
        }
 
-       ql_dbg(ql_dbg_dpc, vha, 0x401c,
+       ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
            "Exiting %s.\n", __func__);
        return 0;
 }
index de722a933438dea5de3b60ce03b212b1170d734d..caf627ba7fa8b3e11a493355bf7a08f120770bcd 100644 (file)
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
        }
 
        /* Offset in flash = lower 16 bits
-        * Number of enteries = upper 16 bits
+        * Number of entries = upper 16 bits
         */
        offset = n & 0xffffU;
        n = (n >> 16) & 0xffffU;
 
-       /* number of addr/value pair should not exceed 1024 enteries */
+       /* number of addr/value pair should not exceed 1024 entries */
        if (n  >= 1024) {
                ql_log(ql_log_fatal, vha, 0x0071,
                    "Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
-               ql_log(ql_log_info, NULL, 0xb054,
+               ql_log(ql_log_info, NULL, 0xb053,
                    "%s: NULL response queue pointer.\n", __func__);
                return IRQ_NONE;
        }
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
 
        if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
                ql_log(ql_log_info, vha, 0x00a1,
-                   "Firmware loaded successully from flash.\n");
+                   "Firmware loaded successfully from flash.\n");
                return QLA_SUCCESS;
        } else {
                ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
        blob = ha->hablob = qla2x00_request_firmware(vha);
        if (!blob) {
                ql_log(ql_log_fatal, vha, 0x00a3,
-                   "Firmware image not preset.\n");
+                   "Firmware image not present.\n");
                goto fw_load_failed;
        }
 
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
                if (!optrom) {
                        ql_log(ql_log_warn, vha, 0xb01b,
                            "Unable to allocate memory "
-                           "for optron burst write (%x KB).\n",
+                           "for optrom burst write (%x KB).\n",
                            OPTROM_BURST_SIZE / 1024);
                }
        }
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
                         * changing the state to DEV_READY
                         */
                        ql_log(ql_log_info, vha, 0xb023,
-                           "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
-                       ql_log(ql_log_info, vha, 0xb024,
-                           "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+                           "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+                           "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
                            drv_active, drv_state);
                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
                            QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
                if (ql2xmdenable) {
                        if (qla82xx_md_collect(vha))
                                ql_log(ql_log_warn, vha, 0xb02c,
-                                   "Not able to collect minidump.\n");
+                                   "Minidump not collected.\n");
                } else
                        ql_log(ql_log_warn, vha, 0xb04f,
                            "Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
                                    "Firmware version differs "
                                    "Previous version: %d:%d:%d - "
                                    "New version: %d:%d:%d\n",
+                                   fw_major_version, fw_minor_version,
+                                   fw_subminor_version,
                                    ha->fw_major_version,
                                    ha->fw_minor_version,
-                                   ha->fw_subminor_version,
-                                   fw_major_version, fw_minor_version,
-                                   fw_subminor_version);
+                                   ha->fw_subminor_version);
                                /* Release MiniDump resources */
                                qla82xx_md_free(vha);
                                /* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
        return rval;
 }
 
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+       uint32_t temp, temp_state, temp_val;
+       struct qla_hw_data *ha = vha->hw;
+
+       temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+       temp_state = qla82xx_get_temp_state(temp);
+       temp_val = qla82xx_get_temp_val(temp);
+
+       if (temp_state == QLA82XX_TEMP_PANIC) {
+               ql_log(ql_log_warn, vha, 0x600e,
+                   "Device temperature %d degrees C exceeds "
+                   " maximum allowed. Hardware has been shut down.\n",
+                   temp_val);
+               return 1;
+       } else if (temp_state == QLA82XX_TEMP_WARN) {
+               ql_log(ql_log_warn, vha, 0x600f,
+                   "Device temperature %d degrees C exceeds "
+                   "operating range. Immediate action needed.\n",
+                   temp_val);
+       }
+       return 0;
+}
+
 void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
        /* don't poll if reset is going on */
        if (!ha->flags.isp82xx_reset_hdlr_active) {
                dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               if (dev_state == QLA82XX_DEV_NEED_RESET &&
+               if (qla82xx_check_temp(vha)) {
+                       set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+                       ha->flags.isp82xx_fw_hung = 1;
+                       qla82xx_clear_pending_mbx(vha);
+               } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
                    !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
                        ql_log(ql_log_warn, vha, 0x6001,
                            "Adapter reset needed.\n");
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-                       qla2xxx_wake_dpc(vha);
                } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
                        !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
                        ql_log(ql_log_warn, vha, 0x6002,
                            "Quiescent needed.\n");
                        set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
-                       qla2xxx_wake_dpc(vha);
                } else {
                        if (qla82xx_check_fw_alive(vha)) {
                                ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                        set_bit(ISP_ABORT_NEEDED,
                                            &vha->dpc_flags);
                                }
-                               qla2xxx_wake_dpc(vha);
                                ha->flags.isp82xx_fw_hung = 1;
                                ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
                                qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
                goto md_failed;
        }
 
+       if (ha->flags.isp82xx_no_md_cap) {
+               ql_log(ql_log_warn, vha, 0xb054,
+                   "Forced reset from application, "
+                   "ignore minidump capture\n");
+               ha->flags.isp82xx_no_md_cap = 0;
+               goto md_failed;
+       }
+
        if (qla82xx_validate_template_chksum(vha)) {
                ql_log(ql_log_info, vha, 0xb039,
                    "Template checksum validation error\n");
index 4ac50e274661af0744ca1ffcbfecdcedf5771b53..6eb210e3cc637242aed65efde17902a0f94c2d60 100644 (file)
@@ -26,6 +26,7 @@
 #define CRB_RCVPEG_STATE               QLA82XX_REG(0x13c)
 #define BOOT_LOADER_DIMM_STATUS                QLA82XX_REG(0x54)
 #define CRB_DMA_SHIFT                  QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE                 QLA82XX_REG(0x1b4)
 #define QLA82XX_DMA_SHIFT_VALUE                0x55555555
 
 #define QLA82XX_HW_H0_CH_HUB_ADR    0x05
 #define QLA82XX_FW_VERSION_SUB         (QLA82XX_CAM_RAM(0x158))
 #define QLA82XX_PCIE_REG(reg)          (QLA82XX_CRB_PCIE + (reg))
 
-#define PCIE_CHICKEN3                  (0x120c8)
 #define PCIE_SETUP_FUNCTION            (0x12040)
 #define PCIE_SETUP_FUNCTION2           (0x12048)
 
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
 #define CRB_NIU_XG_PAUSE_CTL_P0        0x1
 #define CRB_NIU_XG_PAUSE_CTL_P1        0x8
 
+#define qla82xx_get_temp_val(x)          ((x) >> 16)
+#define qla82xx_get_temp_state(x)        ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)  (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+       QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+       QLA82XX_TEMP_WARN,         /* Sound alert, temperature getting high */
+       QLA82XX_TEMP_PANIC         /* Fatal error, hardware has shut down. */
+};
 #endif
index c9c56a8427f3e1d36c2a1c14764a4d4860a782d9..6d1d873a20e2f8997ead5ecadd2e30a183d92c6c 100644 (file)
 #include <linux/mutex.h>
 #include <linux/kobject.h>
 #include <linux/slab.h>
-
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "qla_target.h"
+
 /*
  * Driver version
  */
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
  */
 int ql_errlev = ql_log_all;
 
+int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+               "Specify if Class 2 operations are supported from the very "
+               "beginning. Default is 0 - class 2 not supported.");
+
 int ql2xlogintimeout = 20;
 module_param(ql2xlogintimeout, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
 
        .max_sectors            = 0xFFFF,
        .shost_attrs            = qla2x00_host_attrs,
+
+       .supported_mode         = MODE_INITIATOR,
 };
 
 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
 
 /* -------------------------------------------------------------------------- */
-static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+                               struct rsp_que *rsp)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
        ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
                    "Unable to allocate memory for response queue ptrs.\n");
                goto fail_rsp_map;
        }
+       /*
+        * Make sure we record at least the request and response queue zero in
+        * case we need to free them if part of the probe fails.
+        */
+       ha->rsp_q_map[0] = rsp;
+       ha->req_q_map[0] = req;
        set_bit(0, ha->rsp_qid_map);
        set_bit(0, ha->req_qid_map);
        return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
        if (ha->flags.eeh_busy) {
                if (ha->flags.pci_channel_io_perm_failure) {
-                       ql_dbg(ql_dbg_io, vha, 0x3001,
+                       ql_dbg(ql_dbg_aer, vha, 0x9010,
                            "PCI Channel IO permanent failure, exiting "
                            "cmd=%p.\n", cmd);
                        cmd->result = DID_NO_CONNECT << 16;
                } else {
-                       ql_dbg(ql_dbg_io, vha, 0x3002,
+                       ql_dbg(ql_dbg_aer, vha, 0x9011,
                            "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
                        cmd->result = DID_REQUEUE << 16;
                }
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        rval = fc_remote_port_chkready(rport);
        if (rval) {
                cmd->result = rval;
-               ql_dbg(ql_dbg_io, vha, 0x3003,
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
                    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
                    cmd, rval);
                goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
        ret = FAILED;
 
        ql_log(ql_log_info, vha, 0x8012,
-           "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
+           "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
 
        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
                ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
            "Memory allocated for ha=%p.\n", ha);
        ha->pdev = pdev;
+       ha->tgt.enable_class_2 = ql2xenableclass2;
 
        /* Clear our data area */
        ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
                ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
                ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->max_cmd_len, host->max_channel, host->max_lun,
            host->transportt, sht->vendor_id);
 
+que_init:
+       /* Alloc arrays of request and response ring ptrs */
+       if (!qla2x00_alloc_queues(ha, req, rsp)) {
+               ql_log(ql_log_fatal, base_vha, 0x003d,
+                   "Failed to allocate memory for queue pointers..."
+                   "aborting.\n");
+               goto probe_init_failed;
+       }
+
+       qlt_probe_one_stage1(base_vha, ha);
+
        /* Set up the irqs */
        ret = qla2x00_request_irqs(ha, rsp);
        if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        pci_save_state(pdev);
 
-       /* Alloc arrays of request and response ring ptrs */
-que_init:
-       if (!qla2x00_alloc_queues(ha)) {
-               ql_log(ql_log_fatal, base_vha, 0x003d,
-                   "Failed to allocate memory for queue pointers.. aborting.\n");
-               goto probe_init_failed;
-       }
-
-       ha->rsp_q_map[0] = rsp;
-       ha->req_q_map[0] = req;
+       /* Assign back pointers */
        rsp->req = req;
        req->rsp = rsp;
-       set_bit(0, ha->req_qid_map);
-       set_bit(0, ha->rsp_qid_map);
+
        /* FWI2-capable only. */
        req->req_q_in = &ha->iobase->isp24.req_q_in;
        req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
        ql_dbg(ql_dbg_init, base_vha, 0x00ee,
            "DPC thread started successfully.\n");
 
+       /*
+        * If we're not coming up in initiator mode, we might sit for
+        * a while without waking up the dpc thread, which leads to a
+        * stuck process warning.  So just kick the dpc once here and
+        * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+        */
+       qla2xxx_wake_dpc(base_vha);
+
 skip_dpc:
        list_add_tail(&base_vha->list, &ha->vp_list);
        base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
        ql_dbg(ql_dbg_init, base_vha, 0x00f2,
            "Init done and hba is online.\n");
 
-       scsi_scan_host(host);
+       if (qla_ini_mode_enabled(base_vha))
+               scsi_scan_host(host);
+       else
+               ql_dbg(ql_dbg_init, base_vha, 0x0122,
+                       "skipping scsi_scan_host() for non-initiator port\n");
 
        qla2x00_alloc_sysfs_attr(base_vha);
 
@@ -2577,11 +2609,17 @@ skip_dpc:
            base_vha->host_no,
            ha->isp_ops->fw_version_str(base_vha, fw_str));
 
+       qlt_add_target(ha, base_vha);
+
        return 0;
 
 probe_init_failed:
        qla2x00_free_req_que(ha, req);
+       ha->req_q_map[0] = NULL;
+       clear_bit(0, ha->req_qid_map);
        qla2x00_free_rsp_que(ha, rsp);
+       ha->rsp_q_map[0] = NULL;
+       clear_bit(0, ha->rsp_qid_map);
        ha->max_req_queues = ha->max_rsp_queues = 0;
 
 probe_failed:
@@ -2620,6 +2658,22 @@ probe_out:
        return ret;
 }
 
+static void
+qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct task_struct *t = ha->dpc_thread;
+
+       if (ha->dpc_thread == NULL)
+               return;
+       /*
+        * qla2xxx_wake_dpc checks for ->dpc_thread
+        * so we need to zero it out.
+        */
+       ha->dpc_thread = NULL;
+       kthread_stop(t);
+}
+
 static void
 qla2x00_shutdown(struct pci_dev *pdev)
 {
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
        struct qla_hw_data  *ha;
        unsigned long flags;
 
+       /*
+        * If the PCI device is disabled that means that probe failed and any
+        * resources should be have cleaned up on probe exit.
+        */
+       if (!atomic_read(&pdev->enable_cnt))
+               return;
+
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
 
+       ha->flags.host_shutting_down = 1;
+
        mutex_lock(&ha->vport_lock);
        while (ha->cur_vport_count) {
                struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
                ha->dpc_thread = NULL;
                kthread_stop(t);
        }
+       qlt_remove_target(ha, base_vha);
 
        qla2x00_free_sysfs_attr(base_vha);
 
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
        if (vha->timer_active)
                qla2x00_stop_timer(vha);
 
-       /* Kill the kernel thread for this host */
-       if (ha->dpc_thread) {
-               struct task_struct *t = ha->dpc_thread;
-
-               /*
-                * qla2xxx_wake_dpc checks for ->dpc_thread
-                * so we need to zero it out.
-                */
-               ha->dpc_thread = NULL;
-               kthread_stop(t);
-       }
+       qla2x00_stop_dpc_thread(vha);
 
        qla25xx_delete_queues(vha);
 
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
                spin_unlock_irqrestore(vha->host->host_lock, flags);
                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                qla2xxx_wake_dpc(base_vha);
-       } else
+       } else {
                fc_remote_port_delete(rport);
+               qlt_fc_port_deleted(vha, fcport);
+       }
 }
 
 /*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
     int do_login, int defer)
 {
        if (atomic_read(&fcport->state) == FCS_ONLINE &&
-           vha->vp_idx == fcport->vp_idx) {
+           vha->vp_idx == fcport->vha->vp_idx) {
                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
                qla2x00_schedule_rport_del(vha, fcport, defer);
        }
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
        fc_port_t *fcport;
 
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
-               if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
+               if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
                        continue;
 
                /*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
                        qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
                        if (defer)
                                qla2x00_schedule_rport_del(vha, fcport, defer);
-                       else if (vha->vp_idx == fcport->vp_idx)
+                       else if (vha->vp_idx == fcport->vha->vp_idx)
                                qla2x00_schedule_rport_del(vha, fcport, defer);
                }
        }
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        if (!ha->init_cb)
                goto fail;
 
+       if (qlt_mem_alloc(ha) < 0)
+               goto fail_free_init_cb;
+
        ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
                qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
        if (!ha->gid_list)
-               goto fail_free_init_cb;
+               goto fail_free_tgt_mem;
 
        ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
        if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
        ha->gid_list_dma);
        ha->gid_list = NULL;
        ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+       qlt_mem_free(ha);
 fail_free_init_cb:
        dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
        ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
        if (ha->ctx_mempool)
                mempool_destroy(ha->ctx_mempool);
 
+       qlt_mem_free(ha);
+
        if (ha->init_cb)
                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
                        ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 
        ha->gid_list = NULL;
        ha->gid_list_dma = 0;
+
+       ha->tgt.atio_ring = NULL;
+       ha->tgt.atio_dma = 0;
+       ha->tgt.tgt_vp_map = NULL;
 }
 
 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
 
                ha->dpc_active = 1;
 
-               ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
-                   "DPC handler waking up.\n");
-               ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
-                   "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
+               ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+                   "DPC handler waking up, dpc_flags=0x%lx.\n",
+                   base_vha->dpc_flags);
 
                qla2x00_do_work(base_vha);
 
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
                        clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                }
 
+               if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+                       int ret;
+                       ret = qla2x00_send_change_request(base_vha, 0x3, 0);
+                       if (ret != QLA_SUCCESS)
+                               ql_log(ql_log_warn, base_vha, 0x121,
+                                   "Failed to enable receiving of RSCN "
+                                   "requests: 0x%x.\n", ret);
+                       clear_bit(SCR_PENDING, &base_vha->dpc_flags);
+               }
+
                if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
                            "Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
                return -ENOMEM;
        }
 
+       /* Initialize target kmem_cache and mem_pools */
+       ret = qlt_init();
+       if (ret < 0) {
+               kmem_cache_destroy(srb_cachep);
+               return ret;
+       } else if (ret > 0) {
+               /*
+                * If initiator mode is explictly disabled by qlt_init(),
+                * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+                * performing scsi_scan_target() during LOOP UP event.
+                */
+               qla2xxx_transport_functions.disable_target_scan = 1;
+               qla2xxx_transport_vport_functions.disable_target_scan = 1;
+       }
+
        /* Derive version string. */
        strcpy(qla2x00_version_str, QLA2XXX_VERSION);
        if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
                kmem_cache_destroy(srb_cachep);
                ql_log(ql_log_fatal, NULL, 0x0002,
                    "fc_attach_transport failed...Failing load!.\n");
+               qlt_exit();
                return -ENODEV;
        }
 
@@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
            fc_attach_transport(&qla2xxx_transport_vport_functions);
        if (!qla2xxx_transport_vport_template) {
                kmem_cache_destroy(srb_cachep);
+               qlt_exit();
                fc_release_transport(qla2xxx_transport_template);
                ql_log(ql_log_fatal, NULL, 0x0004,
                    "fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
        ret = pci_register_driver(&qla2xxx_pci_driver);
        if (ret) {
                kmem_cache_destroy(srb_cachep);
+               qlt_exit();
                fc_release_transport(qla2xxx_transport_template);
                fc_release_transport(qla2xxx_transport_vport_template);
                ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
        pci_unregister_driver(&qla2xxx_pci_driver);
        qla2x00_release_firmware();
        kmem_cache_destroy(srb_cachep);
+       qlt_exit();
        if (ctx_cachep)
                kmem_cache_destroy(ctx_cachep);
        fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644 (file)
index 0000000..77759c7
--- /dev/null
@@ -0,0 +1,4973 @@
+/*
+ *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ *  based on qla2x00t.c code:
+ *
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation, version 2
+ *  of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+       "Determines when initiator mode will be enabled. Possible values: "
+       "\"exclusive\" - initiator mode will be enabled on load, "
+       "disabled on enabling target mode and then on disabling target mode "
+       "enabled back; "
+       "\"disabled\" - initiator mode will never be enabled; "
+       "\"enabled\" (default) - initiator mode will always stay enabled.");
+
+static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+       FCP_TMF_CMPL = 0,
+       FCP_DATA_LEN_INVALID = 1,
+       FCP_CMND_FIELDS_INVALID = 2,
+       FCP_DATA_PARAM_MISMATCH = 3,
+       FCP_TMF_REJECTED = 4,
+       FCP_TMF_FAILED = 5,
+       FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE      0   /* simple task attribute */
+#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
+#define FCP_PTA_ORDERED     2   /* ordered task attribute */
+#define FCP_PTA_ACA         4   /* auto. contigent allegiance */
+#define FCP_PTA_MASK        7   /* mask for task attribute field */
+#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ *   - Either context is IRQ and only IRQ handler can modify HW data,
+ *     including rings related fields,
+ *
+ *   - Or access to target mode variables from struct qla_tgt doesn't
+ *     cross those functions boundaries, except tgt_stop, which
+ *     additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+       struct atio_from_isp *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+       int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+       *cmd, struct atio_from_isp *atio, int ha_locked);
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
+       struct qla_tgt_srr_imm *imm, int ha_lock);
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_cmd_cachep;
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+       struct qla_tgt *tgt,
+       const uint8_t *port_name)
+{
+       struct qla_tgt_sess *sess;
+
+       list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+               if (!memcmp(sess->port_name, port_name, WWN_SIZE))
+                       return sess;
+       }
+
+       return NULL;
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+       /* Send marker if required */
+       if (unlikely(vha->marker_needed != 0)) {
+               int rc = qla2x00_issue_marker(vha, vha_locked);
+               if (rc != QLA_SUCCESS) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+                           "qla_target(%d): issue_marker() failed\n",
+                           vha->vp_idx);
+               }
+               return rc;
+       }
+       return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+       uint8_t *d_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint8_t vp_idx;
+
+       if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
+               return NULL;
+
+       if (vha->d_id.b.al_pa == d_id[2])
+               return vha;
+
+       BUG_ON(ha->tgt.tgt_vp_map == NULL);
+       vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
+       if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+               return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+       return NULL;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+       uint16_t vp_idx)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (vha->vp_idx == vp_idx)
+               return vha;
+
+       BUG_ON(ha->tgt.tgt_vp_map == NULL);
+       if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+               return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+       return NULL;
+}
+
+void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio)
+{
+       switch (atio->u.raw.entry_type) {
+       case ATIO_TYPE7:
+       {
+               struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+                   atio->u.isp24.fcp_hdr.d_id);
+               if (unlikely(NULL == host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+                           "qla_target(%d): Received ATIO_TYPE7 "
+                           "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+                           atio->u.isp24.fcp_hdr.d_id[0],
+                           atio->u.isp24.fcp_hdr.d_id[1],
+                           atio->u.isp24.fcp_hdr.d_id[2]);
+                       break;
+               }
+               qlt_24xx_atio_pkt(host, atio);
+               break;
+       }
+
+       case IMMED_NOTIFY_TYPE:
+       {
+               struct scsi_qla_host *host = vha;
+               struct imm_ntfy_from_isp *entry =
+                   (struct imm_ntfy_from_isp *)atio;
+
+               if ((entry->u.isp24.vp_index != 0xFF) &&
+                   (entry->u.isp24.nport_handle != 0xFFFF)) {
+                       host = qlt_find_host_by_vp_idx(vha,
+                           entry->u.isp24.vp_index);
+                       if (unlikely(!host)) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+                                   "qla_target(%d): Received "
+                                   "ATIO (IMMED_NOTIFY_TYPE) "
+                                   "with unknown vp_index %d\n",
+                                   vha->vp_idx, entry->u.isp24.vp_index);
+                               break;
+                       }
+               }
+               qlt_24xx_atio_pkt(host, atio);
+               break;
+       }
+
+       default:
+               ql_dbg(ql_dbg_tgt, vha, 0xe040,
+                   "qla_target(%d): Received unknown ATIO atio "
+                   "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+               break;
+       }
+
+       return;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+{
+       switch (pkt->entry_type) {
+       case CTIO_TYPE7:
+       {
+               struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+               struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+                   entry->vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe041,
+                           "qla_target(%d): Response pkt (CTIO_TYPE7) "
+                           "received, with unknown vp_index %d\n",
+                           vha->vp_idx, entry->vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case IMMED_NOTIFY_TYPE:
+       {
+               struct scsi_qla_host *host = vha;
+               struct imm_ntfy_from_isp *entry =
+                   (struct imm_ntfy_from_isp *)pkt;
+
+               host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe042,
+                           "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+                           "received, with unknown vp_index %d\n",
+                           vha->vp_idx, entry->u.isp24.vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case NOTIFY_ACK_TYPE:
+       {
+               struct scsi_qla_host *host = vha;
+               struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+               if (0xFF != entry->u.isp24.vp_index) {
+                       host = qlt_find_host_by_vp_idx(vha,
+                           entry->u.isp24.vp_index);
+                       if (unlikely(!host)) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe043,
+                                   "qla_target(%d): Response "
+                                   "pkt (NOTIFY_ACK_TYPE) "
+                                   "received, with unknown "
+                                   "vp_index %d\n", vha->vp_idx,
+                                   entry->u.isp24.vp_index);
+                               break;
+                       }
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case ABTS_RECV_24XX:
+       {
+               struct abts_recv_from_24xx *entry =
+                   (struct abts_recv_from_24xx *)pkt;
+               struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+                   entry->vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe044,
+                           "qla_target(%d): Response pkt "
+                           "(ABTS_RECV_24XX) received, with unknown "
+                           "vp_index %d\n", vha->vp_idx, entry->vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case ABTS_RESP_24XX:
+       {
+               struct abts_resp_to_24xx *entry =
+                   (struct abts_resp_to_24xx *)pkt;
+               struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+                   entry->vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe045,
+                           "qla_target(%d): Response pkt "
+                           "(ABTS_RECV_24XX) received, with unknown "
+                           "vp_index %d\n", vha->vp_idx, entry->vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       default:
+               qlt_response_pkt(vha, pkt);
+               break;
+       }
+
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+       struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+           free_work);
+       struct qla_tgt *tgt = sess->tgt;
+       struct scsi_qla_host *vha = sess->vha;
+       struct qla_hw_data *ha = vha->hw;
+
+       BUG_ON(!tgt);
+       /*
+        * Release the target session for FC Nexus from fabric module code.
+        */
+       if (sess->se_sess != NULL)
+               ha->tgt.tgt_ops->free_session(sess);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+           "Unregistration of sess %p finished\n", sess);
+
+       kfree(sess);
+       /*
+        * We need to protect against race, when tgt is freed before or
+        * inside wake_up()
+        */
+       tgt->sess_count--;
+       if (tgt->sess_count == 0)
+               wake_up_all(&tgt->waitQ);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+void qlt_unreg_sess(struct qla_tgt_sess *sess)
+{
+       struct scsi_qla_host *vha = sess->vha;
+
+       vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+       list_del(&sess->sess_list_entry);
+       if (sess->deleted)
+               list_del(&sess->del_list_entry);
+
+       INIT_WORK(&sess->free_work, qlt_free_session_done);
+       schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       uint32_t unpacked_lun, lun = 0;
+       uint16_t loop_id;
+       int res = 0;
+       struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+
+       loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+       if (loop_id == 0xFFFF) {
+#if 0 /* FIXME: Re-enable Global event handling.. */
+               /* Global event */
+               atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+               qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
+               if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+                       sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+                           typeof(*sess), sess_list_entry);
+                       switch (mcmd) {
+                       case QLA_TGT_NEXUS_LOSS_SESS:
+                               mcmd = QLA_TGT_NEXUS_LOSS;
+                               break;
+                       case QLA_TGT_ABORT_ALL_SESS:
+                               mcmd = QLA_TGT_ABORT_ALL;
+                               break;
+                       case QLA_TGT_NEXUS_LOSS:
+                       case QLA_TGT_ABORT_ALL:
+                               break;
+                       default:
+                               ql_dbg(ql_dbg_tgt, vha, 0xe046,
+                                   "qla_target(%d): Not allowed "
+                                   "command %x in %s", vha->vp_idx,
+                                   mcmd, __func__);
+                               sess = NULL;
+                               break;
+                       }
+               } else
+                       sess = NULL;
+#endif
+       } else {
+               sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe000,
+           "Using sess for qla_tgt_reset: %p\n", sess);
+       if (!sess) {
+               res = -ESRCH;
+               return res;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe047,
+           "scsi(%ld): resetting (session %p from port "
+           "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
+           "mcmd %x, loop_id %d)\n", vha->host_no, sess,
+           sess->port_name[0], sess->port_name[1],
+           sess->port_name[2], sess->port_name[3],
+           sess->port_name[4], sess->port_name[5],
+           sess->port_name[6], sess->port_name[7],
+           mcmd, loop_id);
+
+       lun = a->u.isp24.fcp_cmnd.lun;
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+           iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+       bool immediate)
+{
+       struct qla_tgt *tgt = sess->tgt;
+       uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+       if (sess->deleted)
+               return;
+
+       ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+           "Scheduling sess %p for deletion\n", sess);
+       list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+       sess->deleted = 1;
+
+       if (immediate)
+               dev_loss_tmo = 0;
+
+       sess->expires = jiffies + dev_loss_tmo * HZ;
+
+       ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+           "qla_target(%d): session for port %02x:%02x:%02x:"
+           "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+           "deletion in %u secs (expires: %lu) immed: %d\n",
+           sess->vha->vp_idx,
+           sess->port_name[0], sess->port_name[1],
+           sess->port_name[2], sess->port_name[3],
+           sess->port_name[4], sess->port_name[5],
+           sess->port_name[6], sess->port_name[7],
+           sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+
+       if (immediate)
+               schedule_delayed_work(&tgt->sess_del_work, 0);
+       else
+               schedule_delayed_work(&tgt->sess_del_work,
+                   jiffies - sess->expires);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
+{
+       struct qla_tgt_sess *sess;
+
+       list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
+               qlt_schedule_sess_for_deletion(sess, true);
+
+       /* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+       uint16_t *loop_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       dma_addr_t gid_list_dma;
+       struct gid_list_info *gid_list;
+       char *id_iter;
+       int res, rc, i;
+       uint16_t entries;
+
+       gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+           &gid_list_dma, GFP_KERNEL);
+       if (!gid_list) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+                   "qla_target(%d): DMA Alloc failed of %u\n",
+                   vha->vp_idx, qla2x00_gid_list_size(ha));
+               return -ENOMEM;
+       }
+
+       /* Get list of logged in devices */
+       rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+       if (rc != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+                   "qla_target(%d): get_id_list() failed: %x\n",
+                   vha->vp_idx, rc);
+               res = -1;
+               goto out_free_id_list;
+       }
+
+       id_iter = (char *)gid_list;
+       res = -1;
+       for (i = 0; i < entries; i++) {
+               struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+               if ((gid->al_pa == s_id[2]) &&
+                   (gid->area == s_id[1]) &&
+                   (gid->domain == s_id[0])) {
+                       *loop_id = le16_to_cpu(gid->loop_id);
+                       res = 0;
+                       break;
+               }
+               id_iter += ha->gid_list_info_size;
+       }
+
+out_free_id_list:
+       dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+           gid_list, gid_list_dma);
+       return res;
+}
+
+static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
+       struct qla_tgt_sess *sess)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_port_24xx_data *pmap24;
+       bool res, found = false;
+       int rc, i;
+       uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
+       uint16_t entries;
+       void *pmap;
+       int pmap_len;
+       fc_port_t *fcport;
+       int global_resets;
+
+retry:
+       global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+       rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
+       if (rc != QLA_SUCCESS) {
+               res = false;
+               goto out;
+       }
+
+       pmap24 = pmap;
+       entries = pmap_len/sizeof(*pmap24);
+
+       for (i = 0; i < entries; ++i) {
+               if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
+                       loop_id = le16_to_cpu(pmap24[i].loop_id);
+                       found = true;
+                       break;
+               }
+       }
+
+       kfree(pmap);
+
+       if (!found) {
+               res = false;
+               goto out;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
+           "qlt_check_fcport_exist(): loop_id %d", loop_id);
+
+       fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+       if (fcport == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
+                   "qla_target(%d): Allocation of tmp FC port failed",
+                   vha->vp_idx);
+               res = false;
+               goto out;
+       }
+
+       fcport->loop_id = loop_id;
+
+       rc = qla2x00_get_port_database(vha, fcport, 0);
+       if (rc != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
+                   "qla_target(%d): Failed to retrieve fcport "
+                   "information -- get_port_database() returned %x "
+                   "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+               res = false;
+               goto out_free_fcport;
+       }
+
+       if (global_resets !=
+           atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
+                   "qla_target(%d): global reset during session discovery"
+                   " (counter was %d, new %d), retrying",
+                   vha->vp_idx, global_resets,
+                   atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+               goto retry;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+           "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
+           "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
+           sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
+           fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
+
+       sess->s_id = fcport->d_id;
+       sess->loop_id = fcport->loop_id;
+       sess->conf_compl_supported = !!(fcport->flags &
+           FCF_CONF_COMP_SUPPORTED);
+
+       res = true;
+
+out_free_fcport:
+       kfree(fcport);
+
+out:
+       return res;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+{
+       BUG_ON(!sess->deleted);
+
+       list_del(&sess->del_list_entry);
+       sess->deleted = 0;
+}
+
+static void qlt_del_sess_work_fn(struct delayed_work *work)
+{
+       struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+           sess_del_work);
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       while (!list_empty(&tgt->del_sess_list)) {
+               sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+                   del_list_entry);
+               if (time_after_eq(jiffies, sess->expires)) {
+                       bool cancel;
+
+                       qlt_undelete_sess(sess);
+
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       cancel = qlt_check_fcport_exist(vha, sess);
+
+                       if (cancel) {
+                               if (sess->deleted) {
+                                       /*
+                                        * sess was again deleted while we were
+                                        * discovering it
+                                        */
+                                       spin_lock_irqsave(&ha->hardware_lock,
+                                           flags);
+                                       continue;
+                               }
+
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
+                                   "qla_target(%d): cancel deletion of "
+                                   "session for port %02x:%02x:%02x:%02x:%02x:"
+                                   "%02x:%02x:%02x (loop ID %d), because "
+                                   " it isn't deleted by firmware",
+                                   vha->vp_idx, sess->port_name[0],
+                                   sess->port_name[1], sess->port_name[2],
+                                   sess->port_name[3], sess->port_name[4],
+                                   sess->port_name[5], sess->port_name[6],
+                                   sess->port_name[7], sess->loop_id);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+                                   "Timeout: sess %p about to be deleted\n",
+                                   sess);
+                               ha->tgt.tgt_ops->shutdown_sess(sess);
+                               ha->tgt.tgt_ops->put_sess(sess);
+                       }
+
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+               } else {
+                       schedule_delayed_work(&tgt->sess_del_work,
+                           jiffies - sess->expires);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct qla_tgt_sess *qlt_create_sess(
+       struct scsi_qla_host *vha,
+       fc_port_t *fcport,
+       bool local)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+       unsigned char be_sid[3];
+
+       /* Check to avoid double sessions */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+                               sess_list_entry) {
+               if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+                           "Double sess %p found (s_id %x:%x:%x, "
+                           "loop_id %d), updating to d_id %x:%x:%x, "
+                           "loop_id %d", sess, sess->s_id.b.domain,
+                           sess->s_id.b.al_pa, sess->s_id.b.area,
+                           sess->loop_id, fcport->d_id.b.domain,
+                           fcport->d_id.b.al_pa, fcport->d_id.b.area,
+                           fcport->loop_id);
+
+                       if (sess->deleted)
+                               qlt_undelete_sess(sess);
+
+                       kref_get(&sess->se_sess->sess_kref);
+                       sess->s_id = fcport->d_id;
+                       sess->loop_id = fcport->loop_id;
+                       sess->conf_compl_supported = !!(fcport->flags &
+                           FCF_CONF_COMP_SUPPORTED);
+                       if (sess->local && !local)
+                               sess->local = 0;
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+                       return sess;
+               }
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+       if (!sess) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
+                   "qla_target(%u): session allocation failed, "
+                   "all commands from port %02x:%02x:%02x:%02x:"
+                   "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
+                   fcport->port_name[0], fcport->port_name[1],
+                   fcport->port_name[2], fcport->port_name[3],
+                   fcport->port_name[4], fcport->port_name[5],
+                   fcport->port_name[6], fcport->port_name[7]);
+
+               return NULL;
+       }
+       sess->tgt = ha->tgt.qla_tgt;
+       sess->vha = vha;
+       sess->s_id = fcport->d_id;
+       sess->loop_id = fcport->loop_id;
+       sess->local = local;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+           "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+           sess, ha->tgt.qla_tgt);
+
+       be_sid[0] = sess->s_id.b.domain;
+       be_sid[1] = sess->s_id.b.area;
+       be_sid[2] = sess->s_id.b.al_pa;
+       /*
+        * Determine if this fc_port->port_name is allowed to access
+        * target mode using explict NodeACLs+MappedLUNs, or using
+        * TPG demo mode.  If this is successful a target mode FC nexus
+        * is created.
+        */
+       if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+           &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
+               kfree(sess);
+               return NULL;
+       }
+       /*
+        * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+        * access across ->hardware_lock reaquire.
+        */
+       kref_get(&sess->se_sess->sess_kref);
+
+       sess->conf_compl_supported = !!(fcport->flags &
+           FCF_CONF_COMP_SUPPORTED);
+       BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
+       memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
+       ha->tgt.qla_tgt->sess_count++;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+           "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
+           "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
+           " completion %ssupported) added\n",
+           vha->vp_idx, local ?  "local " : "", fcport->port_name[0],
+           fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
+           fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
+           fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
+           sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
+           "" : "not ");
+
+       return sess;
+}
+
+/*
+ * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+
+       if (!vha->hw->tgt.tgt_ops)
+               return;
+
+       if (!tgt || (fcport->port_type != FCT_INITIATOR))
+               return;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (tgt->tgt_stop) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+       sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_create_sess(vha, fcport, false);
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+       } else {
+               kref_get(&sess->se_sess->sess_kref);
+
+               if (sess->deleted) {
+                       qlt_undelete_sess(sess);
+
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+                           "qla_target(%u): %ssession for port %02x:"
+                           "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
+                           "reappeared\n", vha->vp_idx, sess->local ? "local "
+                           : "", sess->port_name[0], sess->port_name[1],
+                           sess->port_name[2], sess->port_name[3],
+                           sess->port_name[4], sess->port_name[5],
+                           sess->port_name[6], sess->port_name[7],
+                           sess->loop_id);
+
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+                           "Reappeared sess %p\n", sess);
+               }
+               sess->s_id = fcport->d_id;
+               sess->loop_id = fcport->loop_id;
+               sess->conf_compl_supported = !!(fcport->flags &
+                   FCF_CONF_COMP_SUPPORTED);
+       }
+
+       if (sess && sess->local) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+                   "qla_target(%u): local session for "
+                   "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+                   "(loop ID %d) became global\n", vha->vp_idx,
+                   fcport->port_name[0], fcport->port_name[1],
+                   fcport->port_name[2], fcport->port_name[3],
+                   fcport->port_name[4], fcport->port_name[5],
+                   fcport->port_name[6], fcport->port_name[7],
+                   sess->loop_id);
+               sess->local = 0;
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ha->tgt.tgt_ops->put_sess(sess);
+}
+
+void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+
+       if (!vha->hw->tgt.tgt_ops)
+               return;
+
+       if (!tgt || (fcport->port_type != FCT_INITIATOR))
+               return;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (tgt->tgt_stop) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+       sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+       sess->local = 1;
+       qlt_schedule_sess_for_deletion(sess, false);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+       struct qla_hw_data *ha = tgt->ha;
+       unsigned long flags;
+       int res;
+       /*
+        * We need to protect against race, when tgt is freed before or
+        * inside wake_up()
+        */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+           "tgt %p, empty(sess_list)=%d sess_count=%d\n",
+           tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+       res = (tgt->sess_count == 0);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase1(struct qla_tgt *tgt)
+{
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = tgt->ha;
+       unsigned long flags;
+
+       if (tgt->tgt_stop || tgt->tgt_stopped) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+                   "Already in tgt->tgt_stop or tgt_stopped state\n");
+               dump_stack();
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+           vha->host_no, vha);
+       /*
+        * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+        * Lock is needed, because we still can get an incoming packet.
+        */
+       mutex_lock(&ha->tgt.tgt_mutex);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       tgt->tgt_stop = 1;
+       qlt_clear_tgt_db(tgt, true);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       mutex_unlock(&ha->tgt.tgt_mutex);
+
+       flush_delayed_work_sync(&tgt->sess_del_work);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+           "Waiting for sess works (tgt %p)", tgt);
+       spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       while (!list_empty(&tgt->sess_works_list)) {
+               spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+               flush_scheduled_work();
+               spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       }
+       spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+           "Waiting for tgt %p: list_empty(sess_list)=%d "
+           "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
+           tgt->sess_count);
+
+       wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+       /* Big hammer */
+       if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+               qlt_disable_vha(vha);
+
+       /* Wait for sessions to clear out (just in case) */
+       wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+       struct qla_hw_data *ha = tgt->ha;
+       unsigned long flags;
+
+       if (tgt->tgt_stopped) {
+               ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+                   "Already in tgt->tgt_stopped state\n");
+               dump_stack();
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+           "Waiting for %d IRQ commands to complete (tgt %p)",
+           tgt->irq_cmd_count, tgt);
+
+       mutex_lock(&ha->tgt.tgt_mutex);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       while (tgt->irq_cmd_count != 0) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               udelay(2);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+       }
+       tgt->tgt_stop = 0;
+       tgt->tgt_stopped = 1;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       mutex_unlock(&ha->tgt.tgt_mutex);
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+           tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+void qlt_release(struct qla_tgt *tgt)
+{
+       struct qla_hw_data *ha = tgt->ha;
+
+       if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+               qlt_stop_phase2(tgt);
+
+       ha->tgt.qla_tgt = NULL;
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+           "Release of tgt %p finished\n", tgt);
+
+       kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+       const void *param, unsigned int param_size)
+{
+       struct qla_tgt_sess_work_param *prm;
+       unsigned long flags;
+
+       prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+       if (!prm) {
+               ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+                   "qla_target(%d): Unable to create session "
+                   "work, command will be refused", 0);
+               return -ENOMEM;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+           "Scheduling work (type %d, prm %p)"
+           " to find session for param %p (size %d, tgt %p)\n",
+           type, prm, param, param_size, tgt);
+
+       prm->type = type;
+       memcpy(&prm->tm_iocb, param, param_size);
+
+       spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+       spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+       schedule_work(&tgt->sess_work);
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *ntfy,
+       uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+       uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       struct nack_to_isp *nack;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+               return;
+
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!pkt) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe049,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return;
+       }
+
+       if (ha->tgt.qla_tgt != NULL)
+               ha->tgt.qla_tgt->notify_ack_expected++;
+
+       pkt->entry_type = NOTIFY_ACK_TYPE;
+       pkt->entry_count = 1;
+
+       nack = (struct nack_to_isp *)pkt;
+       nack->ox_id = ntfy->ox_id;
+
+       nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+       if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+               nack->u.isp24.flags = ntfy->u.isp24.flags &
+                       __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+       }
+       nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+       nack->u.isp24.status = ntfy->u.isp24.status;
+       nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+       nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+       nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+       nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+       nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+       nack->u.isp24.srr_reject_code = srr_reject_code;
+       nack->u.isp24.srr_reject_code_expl = srr_explan;
+       nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe005,
+           "qla_target(%d): Sending 24xx Notify Ack %d\n",
+           vha->vp_idx, nack->u.isp24.status);
+
+       qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+       struct abts_recv_from_24xx *abts, uint32_t status,
+       bool ids_reversed)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct abts_resp_to_24xx *resp;
+       uint32_t f_ctl;
+       uint8_t *p;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe006,
+           "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+           ha, abts, status);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+               return;
+
+       resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!resp) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet", vha->vp_idx, __func__);
+               return;
+       }
+
+       resp->entry_type = ABTS_RESP_24XX;
+       resp->entry_count = 1;
+       resp->nport_handle = abts->nport_handle;
+       resp->vp_index = vha->vp_idx;
+       resp->sof_type = abts->sof_type;
+       resp->exchange_address = abts->exchange_address;
+       resp->fcp_hdr_le = abts->fcp_hdr_le;
+       f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+           F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+           F_CTL_SEQ_INITIATIVE);
+       p = (uint8_t *)&f_ctl;
+       resp->fcp_hdr_le.f_ctl[0] = *p++;
+       resp->fcp_hdr_le.f_ctl[1] = *p++;
+       resp->fcp_hdr_le.f_ctl[2] = *p;
+       if (ids_reversed) {
+               resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+               resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+               resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+               resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+               resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+               resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+       } else {
+               resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+               resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+               resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+               resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+               resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+               resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+       }
+       resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+       if (status == FCP_TMF_CMPL) {
+               resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+               resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+               resp->payload.ba_acct.low_seq_cnt = 0x0000;
+               resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+               resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+               resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+       } else {
+               resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+               resp->payload.ba_rjt.reason_code =
+                       BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+               /* Other bytes are zero */
+       }
+
+       ha->tgt.qla_tgt->abts_resp_expected++;
+
+       qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+       struct abts_resp_from_24xx_fw *entry)
+{
+       struct ctio7_to_24xx *ctio;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe007,
+           "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+               return;
+
+       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+       if (ctio == NULL) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return;
+       }
+
+       /*
+        * We've got on entrance firmware's response on by us generated
+        * ABTS response. So, in it ID fields are reversed.
+        */
+
+       ctio->entry_type = CTIO_TYPE7;
+       ctio->entry_count = 1;
+       ctio->nport_handle = entry->nport_handle;
+       ctio->handle = QLA_TGT_SKIP_HANDLE |    CTIO_COMPLETION_HANDLE_MARK;
+       ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio->vp_index = vha->vp_idx;
+       ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+       ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+       ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+       ctio->exchange_addr = entry->exchange_addr_to_abort;
+       ctio->u.status1.flags =
+           __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+               CTIO7_FLAGS_TERMINATE);
+       ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+
+       qla2x00_start_iocbs(vha, vha->req);
+
+       qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+           FCP_TMF_CMPL, true);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+       struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_mgmt_cmd *mcmd;
+       int rc;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+           "qla_target(%d): task abort (tag=%d)\n",
+           vha->vp_idx, abts->exchange_addr_to_abort);
+
+       mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+       if (mcmd == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+                   "qla_target(%d): %s: Allocation of ABORT cmd failed",
+                   vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+       memset(mcmd, 0, sizeof(*mcmd));
+
+       mcmd->sess = sess;
+       memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+
+       rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+           abts->exchange_addr_to_abort);
+       if (rc != 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+                   "qla_target(%d):  tgt_ops->handle_tmr()"
+                   " failed: %d", vha->vp_idx, rc);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+       struct abts_recv_from_24xx *abts)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       uint32_t tag = abts->exchange_addr_to_abort;
+       uint8_t s_id[3];
+       int rc;
+
+       if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+                   "qla_target(%d): ABTS: Abort Sequence not "
+                   "supported\n", vha->vp_idx);
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+
+       if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+                   "qla_target(%d): ABTS: Unknown Exchange "
+                   "Address received\n", vha->vp_idx);
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+           "qla_target(%d): task abort (s_id=%x:%x:%x, "
+           "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+           abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+           le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+       s_id[0] = abts->fcp_hdr_le.s_id[2];
+       s_id[1] = abts->fcp_hdr_le.s_id[1];
+       s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+       if (!sess) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+                   "qla_target(%d): task abort for non-existant session\n",
+                   vha->vp_idx);
+               rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+                   QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+               if (rc != 0) {
+                       qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
+                           false);
+               }
+               return;
+       }
+
+       rc = __qlt_24xx_handle_abts(vha, abts, sess);
+       if (rc != 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+                   "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+                   vha->vp_idx, rc);
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+       struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+       struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+       struct ctio7_to_24xx *ctio;
+
+       ql_dbg(ql_dbg_tgt, ha, 0xe008,
+           "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+           ha, atio, resp_code);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
+               return;
+
+       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+       if (ctio == NULL) {
+               ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", ha->vp_idx, __func__);
+               return;
+       }
+
+       ctio->entry_type = CTIO_TYPE7;
+       ctio->entry_count = 1;
+       ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+       ctio->nport_handle = mcmd->sess->loop_id;
+       ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio->vp_index = ha->vp_idx;
+       ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+           __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+               CTIO7_FLAGS_SEND_STATUS);
+       ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       ctio->u.status1.scsi_status =
+           __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+       ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+       ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
+
+       qla2x00_start_iocbs(ha, ha->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+       mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+       struct scsi_qla_host *vha = mcmd->sess->vha;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+           "TM response mcmd (%p) status %#x state %#x",
+           mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
+               qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+                   0, 0, 0, 0, 0, 0);
+       else {
+               if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+                       qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+                           mcmd->fc_tm_rsp, false);
+               else
+                       qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+                           mcmd->fc_tm_rsp);
+       }
+       /*
+        * Make the callback for ->free_mcmd() to queue_work() and invoke
+        * target_put_sess_cmd() to drop cmd_kref to 1.  The final
+        * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+        * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+        * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+        * qlt_xmit_tm_rsp() returns here..
+        */
+       ha->tgt.tgt_ops->free_mcmd(mcmd);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+       struct qla_tgt_cmd *cmd = prm->cmd;
+
+       BUG_ON(cmd->sg_cnt == 0);
+
+       prm->sg = (struct scatterlist *)cmd->sg;
+       prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+           cmd->sg_cnt, cmd->dma_data_direction);
+       if (unlikely(prm->seg_cnt == 0))
+               goto out_err;
+
+       prm->cmd->sg_mapped = 1;
+
+       /*
+        * If greater than four sg entries then we need to allocate
+        * the continuation entries
+        */
+       if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+               prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+                   prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+
+       ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
+           prm->seg_cnt, prm->req_cnt);
+       return 0;
+
+out_err:
+       ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+           "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+           0, prm->cmd->sg_cnt);
+       return -1;
+}
+
+static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       BUG_ON(!cmd->sg_mapped);
+       pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+       cmd->sg_mapped = 0;
+}
+
+static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+       uint32_t req_cnt)
+{
+       struct qla_hw_data *ha = vha->hw;
+       device_reg_t __iomem *reg = ha->iobase;
+       uint32_t cnt;
+
+       if (vha->req->cnt < (req_cnt + 2)) {
+               cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+
+               ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+                   "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+                   "vha->req->cnt=%d, req_cnt=%d\n", cnt,
+                   vha->req->ring_index, vha->req->cnt, req_cnt);
+               if  (vha->req->ring_index < cnt)
+                       vha->req->cnt = cnt - vha->req->ring_index;
+               else
+                       vha->req->cnt = vha->req->length -
+                           (vha->req->ring_index - cnt);
+       }
+
+       if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe00b,
+                   "qla_target(%d): There is no room in the "
+                   "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
+                   "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
+                   vha->req->cnt, req_cnt);
+               return -EAGAIN;
+       }
+       vha->req->cnt -= req_cnt;
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+{
+       /* Adjust ring index. */
+       vha->req->ring_index++;
+       if (vha->req->ring_index == vha->req->length) {
+               vha->req->ring_index = 0;
+               vha->req->ring_ptr = vha->req->ring;
+       } else {
+               vha->req->ring_ptr++;
+       }
+       return (cont_entry_t *)vha->req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t h;
+
+       h = ha->tgt.current_handle;
+       /* always increment cmd handle */
+       do {
+               ++h;
+               if (h > MAX_OUTSTANDING_COMMANDS)
+                       h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
+               if (h == ha->tgt.current_handle) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+                           "qla_target(%d): Ran out of "
+                           "empty cmd slots in ha %p\n", vha->vp_idx, ha);
+                       h = QLA_TGT_NULL_HANDLE;
+                       break;
+               }
+       } while ((h == QLA_TGT_NULL_HANDLE) ||
+           (h == QLA_TGT_SKIP_HANDLE) ||
+           (ha->tgt.cmds[h-1] != NULL));
+
+       if (h != QLA_TGT_NULL_HANDLE)
+               ha->tgt.current_handle = h;
+
+       return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
+       struct scsi_qla_host *vha)
+{
+       uint32_t h;
+       struct ctio7_to_24xx *pkt;
+       struct qla_hw_data *ha = vha->hw;
+       struct atio_from_isp *atio = &prm->cmd->atio;
+
+       pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+       prm->pkt = pkt;
+       memset(pkt, 0, sizeof(*pkt));
+
+       pkt->entry_type = CTIO_TYPE7;
+       pkt->entry_count = (uint8_t)prm->req_cnt;
+       pkt->vp_index = vha->vp_idx;
+
+       h = qlt_make_handle(vha);
+       if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+               /*
+                * CTIO type 7 from the firmware doesn't provide a way to
+                * know the initiator's LOOP ID, hence we can't find
+                * the session and, so, the command.
+                */
+               return -EAGAIN;
+       } else
+               ha->tgt.cmds[h-1] = prm->cmd;
+
+       pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+       pkt->nport_handle = prm->cmd->loop_id;
+       pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       pkt->exchange_addr = atio->u.isp24.exchange_addr;
+       pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+       pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe00c,
+           "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
+           vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
+           le16_to_cpu(pkt->u.status0.ox_id));
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
+       struct scsi_qla_host *vha)
+{
+       int cnt;
+       uint32_t *dword_ptr;
+       int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+
+       /* Build continuation packets */
+       while (prm->seg_cnt > 0) {
+               cont_a64_entry_t *cont_pkt64 =
+                       (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+
+               /*
+                * Make sure that from cont_pkt64 none of
+                * 64-bit specific fields used for 32-bit
+                * addressing. Cast to (cont_entry_t *) for
+                * that.
+                */
+
+               memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+               cont_pkt64->entry_count = 1;
+               cont_pkt64->sys_define = 0;
+
+               if (enable_64bit_addressing) {
+                       cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+                       dword_ptr =
+                           (uint32_t *)&cont_pkt64->dseg_0_address;
+               } else {
+                       cont_pkt64->entry_type = CONTINUE_TYPE;
+                       dword_ptr =
+                           (uint32_t *)&((cont_entry_t *)
+                               cont_pkt64)->dseg_0_address;
+               }
+
+               /* Load continuation entry data segments */
+               for (cnt = 0;
+                   cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+                   cnt++, prm->seg_cnt--) {
+                       *dword_ptr++ =
+                           cpu_to_le32(pci_dma_lo32
+                               (sg_dma_address(prm->sg)));
+                       if (enable_64bit_addressing) {
+                               *dword_ptr++ =
+                                   cpu_to_le32(pci_dma_hi32
+                                       (sg_dma_address
+                                       (prm->sg)));
+                       }
+                       *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+                       ql_dbg(ql_dbg_tgt, vha, 0xe00d,
+                           "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
+                           (long long unsigned int)
+                           pci_dma_hi32(sg_dma_address(prm->sg)),
+                           (long long unsigned int)
+                           pci_dma_lo32(sg_dma_address(prm->sg)),
+                           (int)sg_dma_len(prm->sg));
+
+                       prm->sg = sg_next(prm->sg);
+               }
+       }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm,
+       struct scsi_qla_host *vha)
+{
+       int cnt;
+       uint32_t *dword_ptr;
+       int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+       struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe00e,
+           "iocb->scsi_status=%x, iocb->flags=%x\n",
+           le16_to_cpu(pkt24->u.status0.scsi_status),
+           le16_to_cpu(pkt24->u.status0.flags));
+
+       pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+       /* Setup packet address segment pointer */
+       dword_ptr = pkt24->u.status0.dseg_0_address;
+
+       /* Set total data segment count */
+       if (prm->seg_cnt)
+               pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+       if (prm->seg_cnt == 0) {
+               /* No data transfer */
+               *dword_ptr++ = 0;
+               *dword_ptr = 0;
+               return;
+       }
+
+       /* If scatter gather */
+       ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
+
+       /* Load command entry data segments */
+       for (cnt = 0;
+           (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+           cnt++, prm->seg_cnt--) {
+               *dword_ptr++ =
+                   cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+               if (enable_64bit_addressing) {
+                       *dword_ptr++ =
+                           cpu_to_le32(pci_dma_hi32(
+                               sg_dma_address(prm->sg)));
+               }
+               *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+               ql_dbg(ql_dbg_tgt, vha, 0xe010,
+                   "S/G Segment phys_addr=%llx:%llx, len=%d\n",
+                   (long long unsigned int)pci_dma_hi32(sg_dma_address(
+                   prm->sg)),
+                   (long long unsigned int)pci_dma_lo32(sg_dma_address(
+                   prm->sg)),
+                   (int)sg_dma_len(prm->sg));
+
+               prm->sg = sg_next(prm->sg);
+       }
+
+       qlt_load_cont_data_segments(prm, vha);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+       return cmd->bufflen > 0;
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+       struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+       uint32_t *full_req_cnt)
+{
+       struct qla_tgt *tgt = cmd->tgt;
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+
+       if (unlikely(cmd->aborted)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+                   "qla_target(%d): terminating exchange "
+                   "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+                   se_cmd, cmd->tag);
+
+               cmd->state = QLA_TGT_STATE_ABORTED;
+
+               qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+
+               /* !! At this point cmd could be already freed !! */
+               return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
+           vha->vp_idx, cmd->tag);
+
+       prm->cmd = cmd;
+       prm->tgt = tgt;
+       prm->rq_result = scsi_status;
+       prm->sense_buffer = &cmd->sense_buffer[0];
+       prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+       prm->sg = NULL;
+       prm->seg_cnt = -1;
+       prm->req_cnt = 1;
+       prm->add_status_pkt = 0;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
+           prm->rq_result, xmit_type);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+               return -EFAULT;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
+
+       if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+               if  (qlt_pci_map_calc_cnt(prm) != 0)
+                       return -EAGAIN;
+       }
+
+       *full_req_cnt = prm->req_cnt;
+
+       if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+               prm->residual = se_cmd->residual_count;
+               ql_dbg(ql_dbg_tgt, vha, 0xe014,
+                   "Residual underflow: %d (tag %d, "
+                   "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+                   cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+                   cmd->bufflen, prm->rq_result);
+               prm->rq_result |= SS_RESIDUAL_UNDER;
+       } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+               prm->residual = se_cmd->residual_count;
+               ql_dbg(ql_dbg_tgt, vha, 0xe015,
+                   "Residual overflow: %d (tag %d, "
+                   "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+                   cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+                   cmd->bufflen, prm->rq_result);
+               prm->rq_result |= SS_RESIDUAL_OVER;
+       }
+
+       if (xmit_type & QLA_TGT_XMIT_STATUS) {
+               /*
+                * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+                * ignored in *xmit_response() below
+                */
+               if (qlt_has_data(cmd)) {
+                       if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+                           (IS_FWI2_CAPABLE(ha) &&
+                           (prm->rq_result != 0))) {
+                               prm->add_status_pkt = 1;
+                               (*full_req_cnt)++;
+                       }
+               }
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe016,
+           "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
+           prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
+
+       return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
+       struct qla_tgt_cmd *cmd, int sending_sense)
+{
+       if (ha->tgt.enable_class_2)
+               return 0;
+
+       if (sending_sense)
+               return cmd->conf_compl_supported;
+       else
+               return ha->tgt.enable_explicit_conf &&
+                   cmd->conf_compl_supported;
+}
+
+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
+/*
+ *  Original taken from the XFS code
+ */
+static unsigned long qlt_srr_random(void)
+{
+       static int Inited;
+       static unsigned long RandomValue;
+       static DEFINE_SPINLOCK(lock);
+       /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+       register long rv;
+       register long lo;
+       register long hi;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lock, flags);
+       if (!Inited) {
+               RandomValue = jiffies;
+               Inited = 1;
+       }
+       rv = RandomValue;
+       hi = rv / 127773;
+       lo = rv % 127773;
+       rv = 16807 * lo - 2836 * hi;
+       if (rv <= 0)
+               rv += 2147483647;
+       RandomValue = rv;
+       spin_unlock_irqrestore(&lock, flags);
+       return rv;
+}
+
+static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{
+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
+       if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
+           == 50) {
+               *xmit_type &= ~QLA_TGT_XMIT_STATUS;
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
+                   "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+       }
+#endif
+       /*
+        * It's currently not possible to simulate SRRs for FCP_WRITE without
+        * a physical link layer failure, so don't even try here..
+        */
+       if (cmd->dma_data_direction != DMA_FROM_DEVICE)
+               return;
+
+       if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
+           ((qlt_srr_random() % 100) == 20)) {
+               int i, leave = 0;
+               unsigned int tot_len = 0;
+
+               while (leave == 0)
+                       leave = qlt_srr_random() % cmd->sg_cnt;
+
+               for (i = 0; i < leave; i++)
+                       tot_len += cmd->sg[i].length;
+
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
+                   "Cutting cmd %p (tag %d) buffer"
+                   " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
+                   " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+                   cmd->bufflen, cmd->sg_cnt);
+
+               cmd->bufflen = tot_len;
+               cmd->sg_cnt = leave;
+       }
+
+       if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
+               unsigned int offset = qlt_srr_random() % cmd->bufflen;
+
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
+                   "Cutting cmd %p (tag %d) buffer head "
+                   "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+                   cmd->bufflen);
+               if (offset == 0)
+                       *xmit_type &= ~QLA_TGT_XMIT_DATA;
+               else if (qlt_set_data_offset(cmd, offset)) {
+                       ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
+                           "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+               }
+       }
+}
+#else
+static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{}
+#endif
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+       struct qla_tgt_prm *prm)
+{
+       prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+           (uint32_t)sizeof(ctio->u.status1.sense_data));
+       ctio->u.status0.flags |=
+           __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+       if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+               ctio->u.status0.flags |= __constant_cpu_to_le16(
+                   CTIO7_FLAGS_EXPLICIT_CONFORM |
+                   CTIO7_FLAGS_CONFORM_REQ);
+       }
+       ctio->u.status0.residual = cpu_to_le32(prm->residual);
+       ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+       if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+               int i;
+
+               if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+                       if (prm->cmd->se_cmd.scsi_status != 0) {
+                               ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+                                   "Skipping EXPLICIT_CONFORM and "
+                                   "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+                                   "non GOOD status\n");
+                               goto skip_explict_conf;
+                       }
+                       ctio->u.status1.flags |= __constant_cpu_to_le16(
+                           CTIO7_FLAGS_EXPLICIT_CONFORM |
+                           CTIO7_FLAGS_CONFORM_REQ);
+               }
+skip_explict_conf:
+               ctio->u.status1.flags &=
+                   ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+               ctio->u.status1.flags |=
+                   __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+               ctio->u.status1.scsi_status |=
+                   __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+               ctio->u.status1.sense_length =
+                   cpu_to_le16(prm->sense_buffer_len);
+               for (i = 0; i < prm->sense_buffer_len/4; i++)
+                       ((uint32_t *)ctio->u.status1.sense_data)[i] =
+                               cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+#if 0
+               if (unlikely((prm->sense_buffer_len % 4) != 0)) {
+                       static int q;
+                       if (q < 10) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe04f,
+                                   "qla_target(%d): %d bytes of sense "
+                                   "lost", prm->tgt->ha->vp_idx,
+                                   prm->sense_buffer_len % 4);
+                               q++;
+                       }
+               }
+#endif
+       } else {
+               ctio->u.status1.flags &=
+                   ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+               ctio->u.status1.flags |=
+                   __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+               ctio->u.status1.sense_length = 0;
+               memset(ctio->u.status1.sense_data, 0,
+                   sizeof(ctio->u.status1.sense_data));
+       }
+
+       /* Sense with len > 24, is it possible ??? */
+}
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+       uint8_t scsi_status)
+{
+       struct scsi_qla_host *vha = cmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct ctio7_to_24xx *pkt;
+       struct qla_tgt_prm prm;
+       uint32_t full_req_cnt = 0;
+       unsigned long flags = 0;
+       int res;
+
+       memset(&prm, 0, sizeof(prm));
+       qlt_check_srr_debug(cmd, &xmit_type);
+
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
+           "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
+           "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
+           1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+
+       res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+           &full_req_cnt);
+       if (unlikely(res != 0)) {
+               if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+                       return 0;
+
+               return res;
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Does F/W have an IOCBs for this request */
+       res = qlt_check_reserve_free_req(vha, full_req_cnt);
+       if (unlikely(res))
+               goto out_unmap_unlock;
+
+       res = qlt_24xx_build_ctio_pkt(&prm, vha);
+       if (unlikely(res != 0))
+               goto out_unmap_unlock;
+
+
+       pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+       if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+               pkt->u.status0.flags |=
+                   __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+                       CTIO7_FLAGS_STATUS_MODE_0);
+
+               qlt_load_data_segments(&prm, vha);
+
+               if (prm.add_status_pkt == 0) {
+                       if (xmit_type & QLA_TGT_XMIT_STATUS) {
+                               pkt->u.status0.scsi_status =
+                                   cpu_to_le16(prm.rq_result);
+                               pkt->u.status0.residual =
+                                   cpu_to_le32(prm.residual);
+                               pkt->u.status0.flags |= __constant_cpu_to_le16(
+                                   CTIO7_FLAGS_SEND_STATUS);
+                               if (qlt_need_explicit_conf(ha, cmd, 0)) {
+                                       pkt->u.status0.flags |=
+                                           __constant_cpu_to_le16(
+                                               CTIO7_FLAGS_EXPLICIT_CONFORM |
+                                               CTIO7_FLAGS_CONFORM_REQ);
+                               }
+                       }
+
+               } else {
+                       /*
+                        * We have already made sure that there is sufficient
+                        * amount of request entries to not drop HW lock in
+                        * req_pkt().
+                        */
+                       struct ctio7_to_24xx *ctio =
+                               (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+
+                       ql_dbg(ql_dbg_tgt, vha, 0xe019,
+                           "Building additional status packet\n");
+
+                       memcpy(ctio, pkt, sizeof(*ctio));
+                       ctio->entry_count = 1;
+                       ctio->dseg_count = 0;
+                       ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+                           CTIO7_FLAGS_DATA_IN);
+
+                       /* Real finish is ctio_m1's finish */
+                       pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+                       pkt->u.status0.flags |= __constant_cpu_to_le16(
+                           CTIO7_FLAGS_DONT_RET_CTIO);
+                       qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+                           &prm);
+                       pr_debug("Status CTIO7: %p\n", ctio);
+               }
+       } else
+               qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+       cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01a,
+           "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
+           pkt, scsi_status);
+
+       qla2x00_start_iocbs(vha, vha->req);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return 0;
+
+out_unmap_unlock:
+       if (cmd->sg_mapped)
+               qlt_unmap_sg(vha, cmd);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+       struct ctio7_to_24xx *pkt;
+       struct scsi_qla_host *vha = cmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = cmd->tgt;
+       struct qla_tgt_prm prm;
+       unsigned long flags;
+       int res = 0;
+
+       memset(&prm, 0, sizeof(prm));
+       prm.cmd = cmd;
+       prm.tgt = tgt;
+       prm.sg = NULL;
+       prm.req_cnt = 1;
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+               return -EIO;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
+           (int)vha->vp_idx);
+
+       /* Calculate number of entries and segments required */
+       if (qlt_pci_map_calc_cnt(&prm) != 0)
+               return -EAGAIN;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Does F/W have an IOCBs for this request */
+       res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+       if (res != 0)
+               goto out_unlock_free_unmap;
+
+       res = qlt_24xx_build_ctio_pkt(&prm, vha);
+       if (unlikely(res != 0))
+               goto out_unlock_free_unmap;
+       pkt = (struct ctio7_to_24xx *)prm.pkt;
+       pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+           CTIO7_FLAGS_STATUS_MODE_0);
+       qlt_load_data_segments(&prm, vha);
+
+       cmd->state = QLA_TGT_STATE_NEED_DATA;
+
+       qla2x00_start_iocbs(vha, vha->req);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+
+out_unlock_free_unmap:
+       if (cmd->sg_mapped)
+               qlt_unmap_sg(vha, cmd);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd,
+       struct atio_from_isp *atio)
+{
+       struct ctio7_to_24xx *ctio24;
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       int ret = 0;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+       if (pkt == NULL) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe050,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+
+       if (cmd != NULL) {
+               if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe051,
+                           "qla_target(%d): Terminating cmd %p with "
+                           "incorrect state %d\n", vha->vp_idx, cmd,
+                           cmd->state);
+               } else
+                       ret = 1;
+       }
+
+       pkt->entry_count = 1;
+       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+       ctio24 = (struct ctio7_to_24xx *)pkt;
+       ctio24->entry_type = CTIO_TYPE7;
+       ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+       ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio24->vp_index = vha->vp_idx;
+       ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+           __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+               CTIO7_FLAGS_TERMINATE);
+       ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+
+       /* Most likely, it isn't needed */
+       ctio24->u.status1.residual = get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[
+           atio->u.isp24.fcp_cmnd.add_cdb_len]);
+       if (ctio24->u.status1.residual != 0)
+               ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+       qla2x00_start_iocbs(vha, vha->req);
+       return ret;
+}
+
+static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+{
+       unsigned long flags;
+       int rc;
+
+       if (qlt_issue_marker(vha, ha_locked) < 0)
+               return;
+
+       if (ha_locked) {
+               rc = __qlt_send_term_exchange(vha, cmd, atio);
+               goto done;
+       }
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       rc = __qlt_send_term_exchange(vha, cmd, atio);
+       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+done:
+       if (rc == 1) {
+               if (!ha_locked && !in_interrupt())
+                       msleep(250); /* just in case */
+
+               vha->hw->tgt.tgt_ops->free_cmd(cmd);
+       }
+}
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+       BUG_ON(cmd->sg_mapped);
+
+       if (unlikely(cmd->free_sg))
+               kfree(cmd->sg);
+       kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd, void *ctio)
+{
+       struct qla_tgt_srr_ctio *sc;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_srr_imm *imm;
+
+       tgt->ctio_srr_id++;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+           "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
+
+       if (!ctio) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
+                   "qla_target(%d): SRR CTIO, but ctio is NULL\n",
+                   vha->vp_idx);
+               return -EINVAL;
+       }
+
+       sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
+       if (sc != NULL) {
+               sc->cmd = cmd;
+               /* IRQ is already OFF */
+               spin_lock(&tgt->srr_lock);
+               sc->srr_id = tgt->ctio_srr_id;
+               list_add_tail(&sc->srr_list_entry,
+                   &tgt->srr_ctio_list);
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+                   "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
+               if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+                       int found = 0;
+                       list_for_each_entry(imm, &tgt->srr_imm_list,
+                           srr_list_entry) {
+                               if (imm->srr_id == sc->srr_id) {
+                                       found = 1;
+                                       break;
+                               }
+                       }
+                       if (found) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
+                                   "Scheduling srr work\n");
+                               schedule_work(&tgt->srr_work);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
+                                   "qla_target(%d): imm_srr_id "
+                                   "== ctio_srr_id (%d), but there is no "
+                                   "corresponding SRR IMM, deleting CTIO "
+                                   "SRR %p\n", vha->vp_idx,
+                                   tgt->ctio_srr_id, sc);
+                               list_del(&sc->srr_list_entry);
+                               spin_unlock(&tgt->srr_lock);
+
+                               kfree(sc);
+                               return -EINVAL;
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+       } else {
+               struct qla_tgt_srr_imm *ti;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
+                   "qla_target(%d): Unable to allocate SRR CTIO entry\n",
+                   vha->vp_idx);
+               spin_lock(&tgt->srr_lock);
+               list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
+                   srr_list_entry) {
+                       if (imm->srr_id == tgt->ctio_srr_id) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
+                                   "IMM SRR %p deleted (id %d)\n",
+                                   imm, imm->srr_id);
+                               list_del(&imm->srr_list_entry);
+                               qlt_reject_free_srr_imm(vha, imm, 1);
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+       struct qla_tgt_cmd *cmd, uint32_t status)
+{
+       int term = 0;
+
+       if (ctio != NULL) {
+               struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+               term = !(c->flags &
+                   __constant_cpu_to_le16(OF_TERM_EXCH));
+       } else
+               term = 1;
+
+       if (term)
+               qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+
+       return term;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
+       uint32_t handle)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       handle--;
+       if (ha->tgt.cmds[handle] != NULL) {
+               struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
+               ha->tgt.cmds[handle] = NULL;
+               return cmd;
+       } else
+               return NULL;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+       uint32_t handle, void *ctio)
+{
+       struct qla_tgt_cmd *cmd = NULL;
+
+       /* Clear out internal marks */
+       handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
+           CTIO_INTERMEDIATE_HANDLE_MARK);
+
+       if (handle != QLA_TGT_NULL_HANDLE) {
+               if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
+                           "SKIP_HANDLE CTIO\n");
+                       return NULL;
+               }
+               /* handle-1 is actually used */
+               if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe052,
+                           "qla_target(%d): Wrong handle %x received\n",
+                           vha->vp_idx, handle);
+                       return NULL;
+               }
+               cmd = qlt_get_cmd(vha, handle);
+               if (unlikely(cmd == NULL)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe053,
+                           "qla_target(%d): Suspicious: unable to "
+                           "find the command with handle %x\n", vha->vp_idx,
+                           handle);
+                       return NULL;
+               }
+       } else if (ctio != NULL) {
+               /* We can't get loop ID from CTIO7 */
+               ql_dbg(ql_dbg_tgt, vha, 0xe054,
+                   "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+                   "support NULL handles\n", vha->vp_idx);
+               return NULL;
+       }
+
+       return cmd;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
+       uint32_t status, void *ctio)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct se_cmd *se_cmd;
+       struct target_core_fabric_ops *tfo;
+       struct qla_tgt_cmd *cmd;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01e,
+           "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
+           vha->vp_idx, ctio, status, handle);
+
+       if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+               /* That could happen only in case of an error/reset/abort */
+               if (status != CTIO_SUCCESS) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+                           "Intermediate CTIO received"
+                           " (status %x)\n", status);
+               }
+               return;
+       }
+
+       cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+       if (cmd == NULL)
+               return;
+
+       se_cmd = &cmd->se_cmd;
+       tfo = se_cmd->se_tfo;
+
+       if (cmd->sg_mapped)
+               qlt_unmap_sg(vha, cmd);
+
+       if (unlikely(status != CTIO_SUCCESS)) {
+               switch (status & 0xFFFF) {
+               case CTIO_LIP_RESET:
+               case CTIO_TARGET_RESET:
+               case CTIO_ABORTED:
+               case CTIO_TIMEOUT:
+               case CTIO_INVALID_RX_ID:
+                       /* They are OK */
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+                           "qla_target(%d): CTIO with "
+                           "status %#x received, state %x, se_cmd %p, "
+                           "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+                           "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+                           status, cmd->state, se_cmd);
+                       break;
+
+               case CTIO_PORT_LOGGED_OUT:
+               case CTIO_PORT_UNAVAILABLE:
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+                           "qla_target(%d): CTIO with PORT LOGGED "
+                           "OUT (29) or PORT UNAVAILABLE (28) status %x "
+                           "received (state %x, se_cmd %p)\n", vha->vp_idx,
+                           status, cmd->state, se_cmd);
+                       break;
+
+               case CTIO_SRR_RECEIVED:
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
+                           "qla_target(%d): CTIO with SRR_RECEIVED"
+                           " status %x received (state %x, se_cmd %p)\n",
+                           vha->vp_idx, status, cmd->state, se_cmd);
+                       if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
+                               break;
+                       else
+                               return;
+
+               default:
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+                           "qla_target(%d): CTIO with error status "
+                           "0x%x received (state %x, se_cmd %p\n",
+                           vha->vp_idx, status, cmd->state, se_cmd);
+                       break;
+               }
+
+               if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+                       if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+                               return;
+       }
+
+       if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+       } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+               int rx_status = 0;
+
+               cmd->state = QLA_TGT_STATE_DATA_IN;
+
+               if (unlikely(status != CTIO_SUCCESS))
+                       rx_status = -EIO;
+               else
+                       cmd->write_data_transferred = 1;
+
+               ql_dbg(ql_dbg_tgt, vha, 0xe020,
+                   "Data received, context %x, rx_status %d\n",
+                   0x0, rx_status);
+
+               ha->tgt.tgt_ops->handle_data(cmd);
+               return;
+       } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+                   "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+       } else {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+                   "qla_target(%d): A command in state (%d) should "
+                   "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+       }
+
+       if (unlikely(status != CTIO_SUCCESS)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+               dump_stack();
+       }
+
+       ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+       if (likely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe021,
+                   "CTIO, but target mode not enabled"
+                   " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
+               return;
+       }
+
+       tgt->irq_cmd_count++;
+       qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
+       tgt->irq_cmd_count--;
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+       uint8_t task_codes)
+{
+       int fcp_task_attr;
+
+       switch (task_codes) {
+       case ATIO_SIMPLE_QUEUE:
+               fcp_task_attr = MSG_SIMPLE_TAG;
+               break;
+       case ATIO_HEAD_OF_QUEUE:
+               fcp_task_attr = MSG_HEAD_TAG;
+               break;
+       case ATIO_ORDERED_QUEUE:
+               fcp_task_attr = MSG_ORDERED_TAG;
+               break;
+       case ATIO_ACA_QUEUE:
+               fcp_task_attr = MSG_ACA_TAG;
+               break;
+       case ATIO_UNTAGGED:
+               fcp_task_attr = MSG_SIMPLE_TAG;
+               break;
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+                   "qla_target: unknown task code %x, use ORDERED instead\n",
+                   task_codes);
+               fcp_task_attr = MSG_ORDERED_TAG;
+               break;
+       }
+
+       return fcp_task_attr;
+}
+
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+                                       uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void qlt_do_work(struct work_struct *work)
+{
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       scsi_qla_host_t *vha = cmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_sess *sess = NULL;
+       struct atio_from_isp *atio = &cmd->atio;
+       unsigned char *cdb;
+       unsigned long flags;
+       uint32_t data_length;
+       int ret, fcp_task_attr, data_dir, bidi = 0;
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           atio->u.isp24.fcp_hdr.s_id);
+       if (sess) {
+               if (unlikely(sess->tearing_down)) {
+                       sess = NULL;
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       goto out_term;
+               } else {
+                       /*
+                        * Do the extra kref_get() before dropping
+                        * qla_hw_data->hardware_lock.
+                        */
+                       kref_get(&sess->se_sess->sess_kref);
+               }
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       if (unlikely(!sess)) {
+               uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+                       "qla_target(%d): Unable to find wwn login"
+                       " (s_id %x:%x:%x), trying to create it manually\n",
+                       vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+               if (atio->u.raw.entry_count > 1) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+                               "Dropping multy entry cmd %p\n", cmd);
+                       goto out_term;
+               }
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_make_local_sess(vha, s_id);
+               /* sess has an extra creation ref. */
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               if (!sess)
+                       goto out_term;
+       }
+
+       cmd->sess = sess;
+       cmd->loop_id = sess->loop_id;
+       cmd->conf_compl_supported = sess->conf_compl_supported;
+
+       cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+       cmd->tag = atio->u.isp24.exchange_addr;
+       cmd->unpacked_lun = scsilun_to_int(
+           (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+
+       if (atio->u.isp24.fcp_cmnd.rddata &&
+           atio->u.isp24.fcp_cmnd.wrdata) {
+               bidi = 1;
+               data_dir = DMA_TO_DEVICE;
+       } else if (atio->u.isp24.fcp_cmnd.rddata)
+               data_dir = DMA_FROM_DEVICE;
+       else if (atio->u.isp24.fcp_cmnd.wrdata)
+               data_dir = DMA_TO_DEVICE;
+       else
+               data_dir = DMA_NONE;
+
+       fcp_task_attr = qlt_get_fcp_task_attr(vha,
+           atio->u.isp24.fcp_cmnd.task_attr);
+       data_length = be32_to_cpu(get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[
+           atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe022,
+           "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
+           cmd, cmd->unpacked_lun, cmd->tag);
+
+       ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+           fcp_task_attr, data_dir, bidi);
+       if (ret != 0)
+               goto out_term;
+       /*
+        * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+        */
+       ha->tgt.tgt_ops->put_sess(sess);
+       return;
+
+out_term:
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+       /*
+        * cmd has not sent to target yet, so pass NULL as the second
+        * argument to qlt_send_term_exchange() and free the memory here.
+        */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+       kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_cmd *cmd;
+
+       if (unlikely(tgt->tgt_stop)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+                   "New command while device %p is shutting down\n", tgt);
+               return -EFAULT;
+       }
+
+       cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+       if (!cmd) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+                   "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&cmd->cmd_list);
+
+       memcpy(&cmd->atio, atio, sizeof(*atio));
+       cmd->state = QLA_TGT_STATE_NEW;
+       cmd->tgt = ha->tgt.qla_tgt;
+       cmd->vha = vha;
+
+       INIT_WORK(&cmd->work, qlt_do_work);
+       queue_work(qla_tgt_wq, &cmd->work);
+       return 0;
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+       int fn, void *iocb, int flags)
+{
+       struct scsi_qla_host *vha = sess->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_mgmt_cmd *mcmd;
+       int res;
+       uint8_t tmr_func;
+
+       mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+       if (!mcmd) {
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+                   "qla_target(%d): Allocation of management "
+                   "command failed, some commands and their data could "
+                   "leak\n", vha->vp_idx);
+               return -ENOMEM;
+       }
+       memset(mcmd, 0, sizeof(*mcmd));
+       mcmd->sess = sess;
+
+       if (iocb) {
+               memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+                   sizeof(mcmd->orig_iocb.imm_ntfy));
+       }
+       mcmd->tmr_func = fn;
+       mcmd->flags = flags;
+
+       switch (fn) {
+       case QLA_TGT_CLEAR_ACA:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
+                   "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
+               tmr_func = TMR_CLEAR_ACA;
+               break;
+
+       case QLA_TGT_TARGET_RESET:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
+                   "qla_target(%d): TARGET_RESET received\n",
+                   sess->vha->vp_idx);
+               tmr_func = TMR_TARGET_WARM_RESET;
+               break;
+
+       case QLA_TGT_LUN_RESET:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+                   "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+               tmr_func = TMR_LUN_RESET;
+               break;
+
+       case QLA_TGT_CLEAR_TS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
+                   "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
+               tmr_func = TMR_CLEAR_TASK_SET;
+               break;
+
+       case QLA_TGT_ABORT_TS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
+                   "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
+               tmr_func = TMR_ABORT_TASK_SET;
+               break;
+#if 0
+       case QLA_TGT_ABORT_ALL:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
+                   "qla_target(%d): Doing ABORT_ALL_TASKS\n",
+                   sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+
+       case QLA_TGT_ABORT_ALL_SESS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
+                   "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
+                   sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+
+       case QLA_TGT_NEXUS_LOSS_SESS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
+                   "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
+                   sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+
+       case QLA_TGT_NEXUS_LOSS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
+                   "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+#endif
+       default:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
+                   "qla_target(%d): Unknown task mgmt fn 0x%x\n",
+                   sess->vha->vp_idx, fn);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -ENOSYS;
+       }
+
+       res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+       if (res != 0) {
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+                   "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+                   sess->vha->vp_idx, res);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt;
+       struct qla_tgt_sess *sess;
+       uint32_t lun, unpacked_lun;
+       int lun_size, fn;
+
+       tgt = ha->tgt.qla_tgt;
+
+       lun = a->u.isp24.fcp_cmnd.lun;
+       lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
+       fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           a->u.isp24.fcp_hdr.s_id);
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       if (!sess) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+                   "qla_target(%d): task mgmt fn 0x%x for "
+                   "non-existant session\n", vha->vp_idx, fn);
+               return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+                   sizeof(struct atio_from_isp));
+       }
+
+       return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+{
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_mgmt_cmd *mcmd;
+       uint32_t lun, unpacked_lun;
+       int rc;
+
+       mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+       if (mcmd == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+                   "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+                   vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+       memset(mcmd, 0, sizeof(*mcmd));
+
+       mcmd->sess = sess;
+       memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+           sizeof(mcmd->orig_iocb.imm_ntfy));
+
+       lun = a->u.isp24.fcp_cmnd.lun;
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+           le16_to_cpu(iocb->u.isp2x.seq_id));
+       if (rc != 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+                   "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+                   vha->vp_idx, rc);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       int loop_id;
+
+       loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+       sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+       if (sess == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+                   "qla_target(%d): task abort for unexisting "
+                   "session\n", vha->vp_idx);
+               return qlt_sched_sess_work(ha->tgt.qla_tgt,
+                   QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+       }
+
+       return __qlt_abort_task(vha, iocb, sess);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_hw_data *ha = vha->hw;
+       int res = 0;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+           "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
+           " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
+           iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
+           iocb->u.isp24.status_subcode);
+
+       switch (iocb->u.isp24.status_subcode) {
+       case ELS_PLOGI:
+       case ELS_FLOGI:
+       case ELS_PRLI:
+       case ELS_LOGO:
+       case ELS_PRLO:
+               res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+               break;
+       case ELS_PDISC:
+       case ELS_ADISC:
+       {
+               struct qla_tgt *tgt = ha->tgt.qla_tgt;
+               if (tgt->link_reinit_iocb_pending) {
+                       qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+                           0, 0, 0, 0, 0, 0);
+                       tgt->link_reinit_iocb_pending = 0;
+               }
+               res = 1; /* send notify ack */
+               break;
+       }
+
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+                   "qla_target(%d): Unsupported ELS command %x "
+                   "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+               res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+               break;
+       }
+
+       return res;
+}
+
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+       struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
+       size_t first_offset = 0, rem_offset = offset, tmp = 0;
+       int i, sg_srr_cnt, bufflen = 0;
+
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
+           "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
+           "cmd->sg_cnt: %u, direction: %d\n",
+           cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+
+       /*
+        * FIXME: Reject non zero SRR relative offset until we can test
+        * this code properly.
+        */
+       pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+       return -1;
+
+       if (!cmd->sg || !cmd->sg_cnt) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
+                   "Missing cmd->sg or zero cmd->sg_cnt in"
+                   " qla_tgt_set_data_offset\n");
+               return -EINVAL;
+       }
+       /*
+        * Walk the current cmd->sg list until we locate the new sg_srr_start
+        */
+       for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
+                   "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+                   i, sg, sg_page(sg), sg->length, sg->offset);
+
+               if ((sg->length + tmp) > offset) {
+                       first_offset = rem_offset;
+                       sg_srr_start = sg;
+                       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
+                           "Found matching sg[%d], using %p as sg_srr_start, "
+                           "and using first_offset: %zu\n", i, sg,
+                           first_offset);
+                       break;
+               }
+               tmp += sg->length;
+               rem_offset -= sg->length;
+       }
+
+       if (!sg_srr_start) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
+                   "Unable to locate sg_srr_start for offset: %u\n", offset);
+               return -EINVAL;
+       }
+       sg_srr_cnt = (cmd->sg_cnt - i);
+
+       sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
+       if (!sg_srr) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
+                   "Unable to allocate sgp\n");
+               return -ENOMEM;
+       }
+       sg_init_table(sg_srr, sg_srr_cnt);
+       sgp = &sg_srr[0];
+       /*
+        * Walk the remaining list for sg_srr_start, mapping to the newly
+        * allocated sg_srr taking first_offset into account.
+        */
+       for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
+               if (first_offset) {
+                       sg_set_page(sgp, sg_page(sg),
+                           (sg->length - first_offset), first_offset);
+                       first_offset = 0;
+               } else {
+                       sg_set_page(sgp, sg_page(sg), sg->length, 0);
+               }
+               bufflen += sgp->length;
+
+               sgp = sg_next(sgp);
+               if (!sgp)
+                       break;
+       }
+
+       cmd->sg = sg_srr;
+       cmd->sg_cnt = sg_srr_cnt;
+       cmd->bufflen = bufflen;
+       cmd->offset += offset;
+       cmd->free_sg = 1;
+
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
+           cmd->sg_cnt);
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
+           cmd->bufflen);
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
+           cmd->offset);
+
+       if (cmd->sg_cnt < 0)
+               BUG();
+
+       if (cmd->bufflen < 0)
+               BUG();
+
+       return 0;
+}
+
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+       uint32_t srr_rel_offs, int *xmit_type)
+{
+       int res = 0, rel_offs;
+
+       rel_offs = srr_rel_offs - cmd->offset;
+       ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
+           srr_rel_offs, rel_offs);
+
+       *xmit_type = QLA_TGT_XMIT_ALL;
+
+       if (rel_offs < 0) {
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
+                   "qla_target(%d): SRR rel_offs (%d) < 0",
+                   cmd->vha->vp_idx, rel_offs);
+               res = -1;
+       } else if (rel_offs == cmd->bufflen)
+               *xmit_type = QLA_TGT_XMIT_STATUS;
+       else if (rel_offs > 0)
+               res = qlt_set_data_offset(cmd, rel_offs);
+
+       return res;
+}
+
+/* No locks, thread context */
+static void qlt_handle_srr(struct scsi_qla_host *vha,
+       struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
+{
+       struct imm_ntfy_from_isp *ntfy =
+           (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_cmd *cmd = sctio->cmd;
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       unsigned long flags;
+       int xmit_type = 0, resp = 0;
+       uint32_t offset;
+       uint16_t srr_ui;
+
+       offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
+       srr_ui = ntfy->u.isp24.srr_ui;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
+           cmd, srr_ui);
+
+       switch (srr_ui) {
+       case SRR_IU_STATUS:
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               qlt_send_notify_ack(vha, ntfy,
+                   0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               xmit_type = QLA_TGT_XMIT_STATUS;
+               resp = 1;
+               break;
+       case SRR_IU_DATA_IN:
+               if (!cmd->sg || !cmd->sg_cnt) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
+                           "Unable to process SRR_IU_DATA_IN due to"
+                           " missing cmd->sg, state: %d\n", cmd->state);
+                       dump_stack();
+                       goto out_reject;
+               }
+               if (se_cmd->scsi_status != 0) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe02a,
+                           "Rejecting SRR_IU_DATA_IN with non GOOD "
+                           "scsi_status\n");
+                       goto out_reject;
+               }
+               cmd->bufflen = se_cmd->data_length;
+
+               if (qlt_has_data(cmd)) {
+                       if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+                               goto out_reject;
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+                       qlt_send_notify_ack(vha, ntfy,
+                           0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       resp = 1;
+               } else {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
+                           "qla_target(%d): SRR for in data for cmd "
+                           "without them (tag %d, SCSI status %d), "
+                           "reject", vha->vp_idx, cmd->tag,
+                           cmd->se_cmd.scsi_status);
+                       goto out_reject;
+               }
+               break;
+       case SRR_IU_DATA_OUT:
+               if (!cmd->sg || !cmd->sg_cnt) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
+                           "Unable to process SRR_IU_DATA_OUT due to"
+                           " missing cmd->sg\n");
+                       dump_stack();
+                       goto out_reject;
+               }
+               if (se_cmd->scsi_status != 0) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe02b,
+                           "Rejecting SRR_IU_DATA_OUT"
+                           " with non GOOD scsi_status\n");
+                       goto out_reject;
+               }
+               cmd->bufflen = se_cmd->data_length;
+
+               if (qlt_has_data(cmd)) {
+                       if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+                               goto out_reject;
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+                       qlt_send_notify_ack(vha, ntfy,
+                           0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       if (xmit_type & QLA_TGT_XMIT_DATA)
+                               qlt_rdy_to_xfer(cmd);
+               } else {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
+                           "qla_target(%d): SRR for out data for cmd "
+                           "without them (tag %d, SCSI status %d), "
+                           "reject", vha->vp_idx, cmd->tag,
+                           cmd->se_cmd.scsi_status);
+                       goto out_reject;
+               }
+               break;
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
+                   "qla_target(%d): Unknown srr_ui value %x",
+                   vha->vp_idx, srr_ui);
+               goto out_reject;
+       }
+
+       /* Transmit response in case of status and data-in cases */
+       if (resp)
+               qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+
+       return;
+
+out_reject:
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
+           NOTIFY_ACK_SRR_FLAGS_REJECT,
+           NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+           NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+       if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+               cmd->state = QLA_TGT_STATE_DATA_IN;
+               dump_stack();
+       } else
+               qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
+       struct qla_tgt_srr_imm *imm, int ha_locked)
+{
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
+
+       if (!ha_locked)
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
+           NOTIFY_ACK_SRR_FLAGS_REJECT,
+           NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+           NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+       if (!ha_locked)
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       kfree(imm);
+}
+
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+       struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_tgt_srr_ctio *sctio;
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
+           tgt);
+
+restart:
+       spin_lock_irqsave(&tgt->srr_lock, flags);
+       list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
+               struct qla_tgt_srr_imm *imm, *i, *ti;
+               struct qla_tgt_cmd *cmd;
+               struct se_cmd *se_cmd;
+
+               imm = NULL;
+               list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
+                                               srr_list_entry) {
+                       if (i->srr_id == sctio->srr_id) {
+                               list_del(&i->srr_list_entry);
+                               if (imm) {
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
+                                         "qla_target(%d): There must be "
+                                         "only one IMM SRR per CTIO SRR "
+                                         "(IMM SRR %p, id %d, CTIO %p\n",
+                                         vha->vp_idx, i, i->srr_id, sctio);
+                                       qlt_reject_free_srr_imm(tgt->vha, i, 0);
+                               } else
+                                       imm = i;
+                       }
+               }
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
+                   "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
+                   sctio->srr_id);
+
+               if (imm == NULL) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
+                           "Not found matching IMM for SRR CTIO (id %d)\n",
+                           sctio->srr_id);
+                       continue;
+               } else
+                       list_del(&sctio->srr_list_entry);
+
+               spin_unlock_irqrestore(&tgt->srr_lock, flags);
+
+               cmd = sctio->cmd;
+               /*
+                * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
+                * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
+                * logic..
+                */
+               cmd->offset = 0;
+               if (cmd->free_sg) {
+                       kfree(cmd->sg);
+                       cmd->sg = NULL;
+                       cmd->free_sg = 0;
+               }
+               se_cmd = &cmd->se_cmd;
+
+               cmd->sg_cnt = se_cmd->t_data_nents;
+               cmd->sg = se_cmd->t_data_sg;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+                   "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+                   "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+                   se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+
+               qlt_handle_srr(vha, sctio, imm);
+
+               kfree(imm);
+               kfree(sctio);
+               goto restart;
+       }
+       spin_unlock_irqrestore(&tgt->srr_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_tgt_srr_imm *imm;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_srr_ctio *sctio;
+
+       tgt->imm_srr_id++;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+           vha->vp_idx);
+
+       imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
+       if (imm != NULL) {
+               memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
+
+               /* IRQ is already OFF */
+               spin_lock(&tgt->srr_lock);
+               imm->srr_id = tgt->imm_srr_id;
+               list_add_tail(&imm->srr_list_entry,
+                   &tgt->srr_imm_list);
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
+                   "IMM NTFY SRR %p added (id %d, ui %x)\n",
+                   imm, imm->srr_id, iocb->u.isp24.srr_ui);
+               if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+                       int found = 0;
+                       list_for_each_entry(sctio, &tgt->srr_ctio_list,
+                           srr_list_entry) {
+                               if (sctio->srr_id == imm->srr_id) {
+                                       found = 1;
+                                       break;
+                               }
+                       }
+                       if (found) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
+                                   "Scheduling srr work\n");
+                               schedule_work(&tgt->srr_work);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
+                                   "qla_target(%d): imm_srr_id "
+                                   "== ctio_srr_id (%d), but there is no "
+                                   "corresponding SRR CTIO, deleting IMM "
+                                   "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
+                                   imm);
+                               list_del(&imm->srr_list_entry);
+
+                               kfree(imm);
+
+                               spin_unlock(&tgt->srr_lock);
+                               goto out_reject;
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+       } else {
+               struct qla_tgt_srr_ctio *ts;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
+                   "qla_target(%d): Unable to allocate SRR IMM "
+                   "entry, SRR request will be rejected\n", vha->vp_idx);
+
+               /* IRQ is already OFF */
+               spin_lock(&tgt->srr_lock);
+               list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
+                   srr_list_entry) {
+                       if (sctio->srr_id == tgt->imm_srr_id) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
+                                   "CTIO SRR %p deleted (id %d)\n",
+                                   sctio, sctio->srr_id);
+                               list_del(&sctio->srr_list_entry);
+                               qlt_send_term_exchange(vha, sctio->cmd,
+                                   &sctio->cmd->atio, 1);
+                               kfree(sctio);
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+               goto out_reject;
+       }
+
+       return;
+
+out_reject:
+       qlt_send_notify_ack(vha, iocb, 0, 0, 0,
+           NOTIFY_ACK_SRR_FLAGS_REJECT,
+           NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+           NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t add_flags = 0;
+       int send_notify_ack = 1;
+       uint16_t status;
+
+       status = le16_to_cpu(iocb->u.isp2x.status);
+       switch (status) {
+       case IMM_NTFY_LIP_RESET:
+       {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+                   "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+                   vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+                   iocb->u.isp24.status_subcode);
+
+               if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+                       send_notify_ack = 0;
+               break;
+       }
+
+       case IMM_NTFY_LIP_LINK_REINIT:
+       {
+               struct qla_tgt *tgt = ha->tgt.qla_tgt;
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+                   "qla_target(%d): LINK REINIT (loop %#x, "
+                   "subcode %x)\n", vha->vp_idx,
+                   le16_to_cpu(iocb->u.isp24.nport_handle),
+                   iocb->u.isp24.status_subcode);
+               if (tgt->link_reinit_iocb_pending) {
+                       qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+                           0, 0, 0, 0, 0, 0);
+               }
+               memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+               tgt->link_reinit_iocb_pending = 1;
+               /*
+                * QLogic requires to wait after LINK REINIT for possible
+                * PDISC or ADISC ELS commands
+                */
+               send_notify_ack = 0;
+               break;
+       }
+
+       case IMM_NTFY_PORT_LOGOUT:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+                   "qla_target(%d): Port logout (loop "
+                   "%#x, subcode %x)\n", vha->vp_idx,
+                   le16_to_cpu(iocb->u.isp24.nport_handle),
+                   iocb->u.isp24.status_subcode);
+
+               if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+                       send_notify_ack = 0;
+               /* The sessions will be cleared in the callback, if needed */
+               break;
+
+       case IMM_NTFY_GLBL_TPRLO:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+                   "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+               if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+                       send_notify_ack = 0;
+               /* The sessions will be cleared in the callback, if needed */
+               break;
+
+       case IMM_NTFY_PORT_CONFIG:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+                   "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+                   status);
+               if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+                       send_notify_ack = 0;
+               /* The sessions will be cleared in the callback, if needed */
+               break;
+
+       case IMM_NTFY_GLBL_LOGO:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+                   "qla_target(%d): Link failure detected\n",
+                   vha->vp_idx);
+               /* I_T nexus loss */
+               if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_IOCB_OVERFLOW:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+                   "qla_target(%d): Cannot provide requested "
+                   "capability (IOCB overflowed the immediate notify "
+                   "resource count)\n", vha->vp_idx);
+               break;
+
+       case IMM_NTFY_ABORT_TASK:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+                   "qla_target(%d): Abort Task (S %08x I %#x -> "
+                   "L %#x)\n", vha->vp_idx,
+                   le16_to_cpu(iocb->u.isp2x.seq_id),
+                   GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+                   le16_to_cpu(iocb->u.isp2x.lun));
+               if (qlt_abort_task(vha, iocb) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_RESOURCE:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+                   "qla_target(%d): Out of resources, host %ld\n",
+                   vha->vp_idx, vha->host_no);
+               break;
+
+       case IMM_NTFY_MSG_RX:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+                   "qla_target(%d): Immediate notify task %x\n",
+                   vha->vp_idx, iocb->u.isp2x.task_flags);
+               if (qlt_handle_task_mgmt(vha, iocb) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_ELS:
+               if (qlt_24xx_handle_els(vha, iocb) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_SRR:
+               qlt_prepare_srr_imm(vha, iocb);
+               send_notify_ack = 0;
+               break;
+
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+                   "qla_target(%d): Received unknown immediate "
+                   "notify status %x\n", vha->vp_idx, status);
+               break;
+       }
+
+       if (send_notify_ack)
+               qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static void qlt_send_busy(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio, uint16_t status)
+{
+       struct ctio7_to_24xx *ctio24;
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       struct qla_tgt_sess *sess = NULL;
+
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           atio->u.isp24.fcp_hdr.s_id);
+       if (!sess) {
+               qlt_send_term_exchange(vha, NULL, atio, 1);
+               return;
+       }
+       /* Sending marker isn't necessary, since we called from ISR */
+
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!pkt) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet", vha->vp_idx, __func__);
+               return;
+       }
+
+       pkt->entry_count = 1;
+       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+       ctio24 = (struct ctio7_to_24xx *)pkt;
+       ctio24->entry_type = CTIO_TYPE7;
+       ctio24->nport_handle = sess->loop_id;
+       ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio24->vp_index = vha->vp_idx;
+       ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+           __constant_cpu_to_le16(
+               CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+               CTIO7_FLAGS_DONT_RET_CTIO);
+       /*
+        * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+        * if the explicit conformation is used.
+        */
+       ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       ctio24->u.status1.scsi_status = cpu_to_le16(status);
+       ctio24->u.status1.residual = get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[
+           atio->u.isp24.fcp_cmnd.add_cdb_len]);
+       if (ctio24->u.status1.residual != 0)
+               ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+       qla2x00_start_iocbs(vha, vha->req);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       int rc;
+
+       if (unlikely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+                   "ATIO pkt, but no tgt (ha %p)", ha);
+               return;
+       }
+       ql_dbg(ql_dbg_tgt, vha, 0xe02c,
+           "qla_target(%d): ATIO pkt %p: type %02x count %02x",
+           vha->vp_idx, atio, atio->u.raw.entry_type,
+           atio->u.raw.entry_count);
+       /*
+        * In tgt_stop mode we also should allow all requests to pass.
+        * Otherwise, some commands can stuck.
+        */
+
+       tgt->irq_cmd_count++;
+
+       switch (atio->u.raw.entry_type) {
+       case ATIO_TYPE7:
+               ql_dbg(ql_dbg_tgt, vha, 0xe02d,
+                   "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
+                   "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+                   vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
+                   atio->u.isp24.fcp_cmnd.rddata,
+                   atio->u.isp24.fcp_cmnd.wrdata,
+                   atio->u.isp24.fcp_cmnd.add_cdb_len,
+                   be32_to_cpu(get_unaligned((uint32_t *)
+                       &atio->u.isp24.fcp_cmnd.add_cdb[
+                       atio->u.isp24.fcp_cmnd.add_cdb_len])),
+                   atio->u.isp24.fcp_hdr.s_id[0],
+                   atio->u.isp24.fcp_hdr.s_id[1],
+                   atio->u.isp24.fcp_hdr.s_id[2]);
+
+               if (unlikely(atio->u.isp24.exchange_addr ==
+                   ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe058,
+                           "qla_target(%d): ATIO_TYPE7 "
+                           "received with UNKNOWN exchange address, "
+                           "sending QUEUE_FULL\n", vha->vp_idx);
+                       qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+                       break;
+               }
+               if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+                       rc = qlt_handle_cmd_for_atio(vha, atio);
+               else
+                       rc = qlt_handle_task_mgmt(vha, atio);
+               if (unlikely(rc != 0)) {
+                       if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+                               qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+#else
+                               qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+                       } else {
+                               if (tgt->tgt_stop) {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe059,
+                                           "qla_target: Unable to send "
+                                           "command to target for req, "
+                                           "ignoring.\n");
+                               } else {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+                                           "qla_target(%d): Unable to send "
+                                           "command to target, sending BUSY "
+                                           "status.\n", vha->vp_idx);
+                                       qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+                               }
+                       }
+               }
+               break;
+
+       case IMMED_NOTIFY_TYPE:
+       {
+               if (unlikely(atio->u.isp2x.entry_status != 0)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+                           "qla_target(%d): Received ATIO packet %x "
+                           "with error status %x\n", vha->vp_idx,
+                           atio->u.raw.entry_type,
+                           atio->u.isp2x.entry_status);
+                       break;
+               }
+               ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+               qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+               break;
+       }
+
+       default:
+               ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+                   "qla_target(%d): Received unknown ATIO atio "
+                   "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+               break;
+       }
+
+       tgt->irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+       if (unlikely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+                   "qla_target(%d): Response pkt %x received, but no "
+                   "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe02f,
+           "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
+           "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
+           pkt->entry_count, pkt->entry_status, pkt->handle);
+
+       /*
+        * In tgt_stop mode we also should allow all requests to pass.
+        * Otherwise, some commands can stuck.
+        */
+
+       tgt->irq_cmd_count++;
+
+       switch (pkt->entry_type) {
+       case CTIO_TYPE7:
+       {
+               struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+               ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
+                   vha->vp_idx);
+               qlt_do_ctio_completion(vha, entry->handle,
+                   le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+                   entry);
+               break;
+       }
+
+       case ACCEPT_TGT_IO_TYPE:
+       {
+               struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+               int rc;
+               ql_dbg(ql_dbg_tgt, vha, 0xe031,
+                   "ACCEPT_TGT_IO instance %d status %04x "
+                   "lun %04x read/write %d data_length %04x "
+                   "target_id %02x rx_id %04x\n ", vha->vp_idx,
+                   le16_to_cpu(atio->u.isp2x.status),
+                   le16_to_cpu(atio->u.isp2x.lun),
+                   atio->u.isp2x.execution_codes,
+                   le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
+                   atio), atio->u.isp2x.rx_id);
+               if (atio->u.isp2x.status !=
+                   __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+                           "qla_target(%d): ATIO with error "
+                           "status %x received\n", vha->vp_idx,
+                           le16_to_cpu(atio->u.isp2x.status));
+                       break;
+               }
+               ql_dbg(ql_dbg_tgt, vha, 0xe032,
+                   "FCP CDB: 0x%02x, sizeof(cdb): %lu",
+                   atio->u.isp2x.cdb[0], (unsigned long
+                   int)sizeof(atio->u.isp2x.cdb));
+
+               rc = qlt_handle_cmd_for_atio(vha, atio);
+               if (unlikely(rc != 0)) {
+                       if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+                               qlt_send_busy(vha, atio, 0);
+#else
+                               qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+                       } else {
+                               if (tgt->tgt_stop) {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+                                           "qla_target: Unable to send "
+                                           "command to target, sending TERM "
+                                           "EXCHANGE for rsp\n");
+                                       qlt_send_term_exchange(vha, NULL,
+                                           atio, 1);
+                               } else {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe060,
+                                           "qla_target(%d): Unable to send "
+                                           "command to target, sending BUSY "
+                                           "status\n", vha->vp_idx);
+                                       qlt_send_busy(vha, atio, 0);
+                               }
+                       }
+               }
+       }
+       break;
+
+       case CONTINUE_TGT_IO_TYPE:
+       {
+               struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+               ql_dbg(ql_dbg_tgt, vha, 0xe033,
+                   "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
+               qlt_do_ctio_completion(vha, entry->handle,
+                   le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+                   entry);
+               break;
+       }
+
+       case CTIO_A64_TYPE:
+       {
+               struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+               ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
+                   vha->vp_idx);
+               qlt_do_ctio_completion(vha, entry->handle,
+                   le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+                   entry);
+               break;
+       }
+
+       case IMMED_NOTIFY_TYPE:
+               ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+               qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+               break;
+
+       case NOTIFY_ACK_TYPE:
+               if (tgt->notify_ack_expected > 0) {
+                       struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+                       ql_dbg(ql_dbg_tgt, vha, 0xe036,
+                           "NOTIFY_ACK seq %08x status %x\n",
+                           le16_to_cpu(entry->u.isp2x.seq_id),
+                           le16_to_cpu(entry->u.isp2x.status));
+                       tgt->notify_ack_expected--;
+                       if (entry->u.isp2x.status !=
+                           __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe061,
+                                   "qla_target(%d): NOTIFY_ACK "
+                                   "failed %x\n", vha->vp_idx,
+                                   le16_to_cpu(entry->u.isp2x.status));
+                       }
+               } else {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe062,
+                           "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+                           vha->vp_idx);
+               }
+               break;
+
+       case ABTS_RECV_24XX:
+               ql_dbg(ql_dbg_tgt, vha, 0xe037,
+                   "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+               qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+               break;
+
+       case ABTS_RESP_24XX:
+               if (tgt->abts_resp_expected > 0) {
+                       struct abts_resp_from_24xx_fw *entry =
+                               (struct abts_resp_from_24xx_fw *)pkt;
+                       ql_dbg(ql_dbg_tgt, vha, 0xe038,
+                           "ABTS_RESP_24XX: compl_status %x\n",
+                           entry->compl_status);
+                       tgt->abts_resp_expected--;
+                       if (le16_to_cpu(entry->compl_status) !=
+                           ABTS_RESP_COMPL_SUCCESS) {
+                               if ((entry->error_subcode1 == 0x1E) &&
+                                   (entry->error_subcode2 == 0)) {
+                                       /*
+                                        * We've got a race here: aborted
+                                        * exchange not terminated, i.e.
+                                        * response for the aborted command was
+                                        * sent between the abort request was
+                                        * received and processed.
+                                        * Unfortunately, the firmware has a
+                                        * silly requirement that all aborted
+                                        * exchanges must be explicitely
+                                        * terminated, otherwise it refuses to
+                                        * send responses for the abort
+                                        * requests. So, we have to
+                                        * (re)terminate the exchange and retry
+                                        * the abort response.
+                                        */
+                                       qlt_24xx_retry_term_exchange(vha,
+                                           entry);
+                               } else
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe063,
+                                           "qla_target(%d): ABTS_RESP_24XX "
+                                           "failed %x (subcode %x:%x)",
+                                           vha->vp_idx, entry->compl_status,
+                                           entry->error_subcode1,
+                                           entry->error_subcode2);
+                       }
+               } else {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe064,
+                           "qla_target(%d): Unexpected ABTS_RESP_24XX "
+                           "received\n", vha->vp_idx);
+               }
+               break;
+
+       default:
+               ql_dbg(ql_dbg_tgt, vha, 0xe065,
+                   "qla_target(%d): Received unknown response pkt "
+                   "type %x\n", vha->vp_idx, pkt->entry_type);
+               break;
+       }
+
+       tgt->irq_cmd_count--;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+       uint16_t *mailbox)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       int login_code;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe039,
+           "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
+           vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
+           ha->operating_mode, ha->current_topology);
+
+       if (!ha->tgt.tgt_ops)
+               return;
+
+       if (unlikely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe03a,
+                   "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
+               return;
+       }
+
+       if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+           IS_QLA2100(ha))
+               return;
+       /*
+        * In tgt_stop mode we also should allow all requests to pass.
+        * Otherwise, some commands can stuck.
+        */
+
+       tgt->irq_cmd_count++;
+
+       switch (code) {
+       case MBA_RESET:                 /* Reset */
+       case MBA_SYSTEM_ERR:            /* System Error */
+       case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
+       case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+                   "qla_target(%d): System error async event %#x "
+                   "occured", vha->vp_idx, code);
+               break;
+       case MBA_WAKEUP_THRES:          /* Request Queue Wake-up. */
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               break;
+
+       case MBA_LOOP_UP:
+       {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+                   "qla_target(%d): Async LOOP_UP occured "
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+               if (tgt->link_reinit_iocb_pending) {
+                       qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+                           0, 0, 0, 0, 0, 0);
+                       tgt->link_reinit_iocb_pending = 0;
+               }
+               break;
+       }
+
+       case MBA_LIP_OCCURRED:
+       case MBA_LOOP_DOWN:
+       case MBA_LIP_RESET:
+       case MBA_RSCN_UPDATE:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+                   "qla_target(%d): Async event %#x occured "
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+               break;
+
+       case MBA_PORT_UPDATE:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+                   "qla_target(%d): Port update async event %#x "
+                   "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+                   "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+               login_code = le16_to_cpu(mailbox[2]);
+               if (login_code == 0x4)
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+                           "Async MB 2: Got PLOGI Complete\n");
+               else if (login_code == 0x7)
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+                           "Async MB 2: Port Logged Out\n");
+               break;
+
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
+                   "qla_target(%d): Async event %#x occured: "
+                   "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+               break;
+       }
+
+       tgt->irq_cmd_count--;
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+       uint16_t loop_id)
+{
+       fc_port_t *fcport;
+       int rc;
+
+       fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+       if (!fcport) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+                   "qla_target(%d): Allocation of tmp FC port failed",
+                   vha->vp_idx);
+               return NULL;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
+
+       fcport->loop_id = loop_id;
+
+       rc = qla2x00_get_port_database(vha, fcport, 0);
+       if (rc != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+                   "qla_target(%d): Failed to retrieve fcport "
+                   "information -- get_port_database() returned %x "
+                   "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+               kfree(fcport);
+               return NULL;
+       }
+
+       return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+       uint8_t *s_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       fc_port_t *fcport = NULL;
+       int rc, global_resets;
+       uint16_t loop_id = 0;
+
+retry:
+       global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+       rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+       if (rc != 0) {
+               if ((s_id[0] == 0xFF) &&
+                   (s_id[1] == 0xFC)) {
+                       /*
+                        * This is Domain Controller, so it should be
+                        * OK to drop SCSI commands from it.
+                        */
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+                           "Unable to find initiator with S_ID %x:%x:%x",
+                           s_id[0], s_id[1], s_id[2]);
+               } else
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+                           "qla_target(%d): Unable to find "
+                           "initiator with S_ID %x:%x:%x",
+                           vha->vp_idx, s_id[0], s_id[1],
+                           s_id[2]);
+               return NULL;
+       }
+
+       fcport = qlt_get_port_database(vha, loop_id);
+       if (!fcport)
+               return NULL;
+
+       if (global_resets !=
+           atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+                   "qla_target(%d): global reset during session discovery "
+                   "(counter was %d, new %d), retrying", vha->vp_idx,
+                   global_resets,
+                   atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+               goto retry;
+       }
+
+       sess = qlt_create_sess(vha, fcport, true);
+
+       kfree(fcport);
+       return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+       struct qla_tgt_sess_work_param *prm)
+{
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       unsigned long flags;
+       uint32_t be_s_id;
+       uint8_t s_id[3];
+       int rc;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+       s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+       s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           (unsigned char *)&be_s_id);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_make_local_sess(vha, s_id);
+               /* sess has got an extra creation ref */
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               if (!sess)
+                       goto out_term;
+       } else {
+               kref_get(&sess->se_sess->sess_kref);
+       }
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+       if (rc != 0)
+               goto out_term;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ha->tgt.tgt_ops->put_sess(sess);
+       return;
+
+out_term:
+       qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+       struct qla_tgt_sess_work_param *prm)
+{
+       struct atio_from_isp *a = &prm->tm_iocb2;
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       unsigned long flags;
+       uint8_t *s_id = NULL; /* to hide compiler warnings */
+       int rc;
+       uint32_t lun, unpacked_lun;
+       int lun_size, fn;
+       void *iocb;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_make_local_sess(vha, s_id);
+               /* sess has got an extra creation ref */
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               if (!sess)
+                       goto out_term;
+       } else {
+               kref_get(&sess->se_sess->sess_kref);
+       }
+
+       iocb = a;
+       lun = a->u.isp24.fcp_cmnd.lun;
+       lun_size = sizeof(lun);
+       fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+       if (rc != 0)
+               goto out_term;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ha->tgt.tgt_ops->put_sess(sess);
+       return;
+
+out_term:
+       qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+       struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+       struct scsi_qla_host *vha = tgt->vha;
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+       spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       while (!list_empty(&tgt->sess_works_list)) {
+               struct qla_tgt_sess_work_param *prm = list_entry(
+                   tgt->sess_works_list.next, typeof(*prm),
+                   sess_works_list_entry);
+
+               /*
+                * This work can be scheduled on several CPUs at time, so we
+                * must delete the entry to eliminate double processing
+                */
+               list_del(&prm->sess_works_list_entry);
+
+               spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+               switch (prm->type) {
+               case QLA_TGT_SESS_WORK_ABORT:
+                       qlt_abort_work(tgt, prm);
+                       break;
+               case QLA_TGT_SESS_WORK_TM:
+                       qlt_tmr_work(tgt, prm);
+                       break;
+               default:
+                       BUG_ON(1);
+                       break;
+               }
+
+               spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+               kfree(prm);
+       }
+       spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+       struct qla_tgt *tgt;
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return 0;
+
+       ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+           "Registering target for host %ld(%p)", base_vha->host_no, ha);
+
+       BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+
+       tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+       if (!tgt) {
+               ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+                   "Unable to allocate struct qla_tgt\n");
+               return -ENOMEM;
+       }
+
+       if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+               base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+       tgt->ha = ha;
+       tgt->vha = base_vha;
+       init_waitqueue_head(&tgt->waitQ);
+       INIT_LIST_HEAD(&tgt->sess_list);
+       INIT_LIST_HEAD(&tgt->del_sess_list);
+       INIT_DELAYED_WORK(&tgt->sess_del_work,
+               (void (*)(struct work_struct *))qlt_del_sess_work_fn);
+       spin_lock_init(&tgt->sess_work_lock);
+       INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+       INIT_LIST_HEAD(&tgt->sess_works_list);
+       spin_lock_init(&tgt->srr_lock);
+       INIT_LIST_HEAD(&tgt->srr_ctio_list);
+       INIT_LIST_HEAD(&tgt->srr_imm_list);
+       INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+       atomic_set(&tgt->tgt_global_resets_count, 0);
+
+       ha->tgt.qla_tgt = tgt;
+
+       ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+               "qla_target(%d): using 64 Bit PCI addressing",
+               base_vha->vp_idx);
+       tgt->tgt_enable_64bit_addr = 1;
+       /* 3 is reserved */
+       tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+       tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
+       tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+
+       mutex_lock(&qla_tgt_mutex);
+       list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+       mutex_unlock(&qla_tgt_mutex);
+
+       return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+       if (!ha->tgt.qla_tgt)
+               return 0;
+
+       mutex_lock(&qla_tgt_mutex);
+       list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+       mutex_unlock(&qla_tgt_mutex);
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+           vha->host_no, ha);
+       qlt_release(ha->tgt.qla_tgt);
+
+       return 0;
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+       unsigned char *b)
+{
+       int i;
+
+       pr_debug("qla2xxx HW vha->node_name: ");
+       for (i = 0; i < WWN_SIZE; i++)
+               pr_debug("%02x ", vha->node_name[i]);
+       pr_debug("\n");
+       pr_debug("qla2xxx HW vha->port_name: ");
+       for (i = 0; i < WWN_SIZE; i++)
+               pr_debug("%02x ", vha->port_name[i]);
+       pr_debug("\n");
+
+       pr_debug("qla2xxx passed configfs WWPN: ");
+       put_unaligned_be64(wwpn, b);
+       for (i = 0; i < WWN_SIZE; i++)
+               pr_debug("%02x ", b[i]);
+       pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback:  lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
+       int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+{
+       struct qla_tgt *tgt;
+       struct scsi_qla_host *vha;
+       struct qla_hw_data *ha;
+       struct Scsi_Host *host;
+       unsigned long flags;
+       int rc;
+       u8 b[WWN_SIZE];
+
+       mutex_lock(&qla_tgt_mutex);
+       list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+               vha = tgt->vha;
+               ha = vha->hw;
+
+               host = vha->host;
+               if (!host)
+                       continue;
+
+               if (ha->tgt.tgt_ops != NULL)
+                       continue;
+
+               if (!(host->hostt->supported_mode & MODE_TARGET))
+                       continue;
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               if (host->active_mode & MODE_TARGET) {
+                       pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+                           host->host_no);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       continue;
+               }
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               if (!scsi_host_get(host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe068,
+                           "Unable to scsi_host_get() for"
+                           " qla2xxx scsi_host\n");
+                       continue;
+               }
+               qlt_lport_dump(vha, wwpn, b);
+
+               if (memcmp(vha->port_name, b, WWN_SIZE)) {
+                       scsi_host_put(host);
+                       continue;
+               }
+               /*
+                * Setup passed parameters ahead of invoking callback
+                */
+               ha->tgt.tgt_ops = qla_tgt_ops;
+               ha->tgt.target_lport_ptr = target_lport_ptr;
+               rc = (*callback)(vha);
+               if (rc != 0) {
+                       ha->tgt.tgt_ops = NULL;
+                       ha->tgt.target_lport_ptr = NULL;
+               }
+               mutex_unlock(&qla_tgt_mutex);
+               return rc;
+       }
+       mutex_unlock(&qla_tgt_mutex);
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha:  Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct Scsi_Host *sh = vha->host;
+       /*
+        * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+        */
+       ha->tgt.target_lport_ptr = NULL;
+       ha->tgt.tgt_ops = NULL;
+       /*
+        * Release the Scsi_Host reference for the underlying qla2xxx host
+        */
+       scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+void qlt_set_mode(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       switch (ql2x_ini_mode) {
+       case QLA2XXX_INI_MODE_DISABLED:
+       case QLA2XXX_INI_MODE_EXCLUSIVE:
+               vha->host->active_mode = MODE_TARGET;
+               break;
+       case QLA2XXX_INI_MODE_ENABLED:
+               vha->host->active_mode |= MODE_TARGET;
+               break;
+       default:
+               break;
+       }
+
+       if (ha->tgt.ini_mode_force_reverse)
+               qla_reverse_ini_mode(vha);
+}
+
+/* Must be called under HW lock */
+void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       switch (ql2x_ini_mode) {
+       case QLA2XXX_INI_MODE_DISABLED:
+               vha->host->active_mode = MODE_UNKNOWN;
+               break;
+       case QLA2XXX_INI_MODE_EXCLUSIVE:
+               vha->host->active_mode = MODE_INITIATOR;
+               break;
+       case QLA2XXX_INI_MODE_ENABLED:
+               vha->host->active_mode &= ~MODE_TARGET;
+               break;
+       default:
+               break;
+       }
+
+       if (ha->tgt.ini_mode_force_reverse)
+               qla_reverse_ini_mode(vha);
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       unsigned long flags;
+
+       if (!tgt) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe069,
+                   "Unable to locate qla_tgt pointer from"
+                   " struct qla_hw_data\n");
+               dump_stack();
+               return;
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       tgt->tgt_stopped = 0;
+       qlt_set_mode(vha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+       qla2xxx_wake_dpc(vha);
+       qla2x00_wait_for_hba_online(vha);
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+void
+qlt_disable_vha(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       unsigned long flags;
+
+       if (!tgt) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+                   "Unable to locate qla_tgt pointer from"
+                   " struct qla_hw_data\n");
+               dump_stack();
+               return;
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_clear_mode(vha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+       qla2xxx_wake_dpc(vha);
+       qla2x00_wait_for_hba_online(vha);
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+       if (!qla_tgt_mode_enabled(vha))
+               return;
+
+       mutex_init(&ha->tgt.tgt_mutex);
+       mutex_init(&ha->tgt.tgt_host_action_mutex);
+
+       qlt_clear_mode(vha);
+
+       /*
+        * NOTE: Currently the value is kept the same for <24xx and
+        * >=24xx ISPs. If it is necessary to change it,
+        * the check should be added for specific ISPs,
+        * assigning the value appropriately.
+        */
+       ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+       /*
+        * FC-4 Feature bit 0 indicates target functionality to the name server.
+        */
+       if (qla_tgt_mode_enabled(vha)) {
+               if (qla_ini_mode_enabled(vha))
+                       ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+               else
+                       ct_req->req.rff_id.fc4_feature = BIT_0;
+       } else if (qla_ini_mode_enabled(vha)) {
+               ct_req->req.rff_id.fc4_feature = BIT_1;
+       }
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint16_t cnt;
+       struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+       if (!qla_tgt_mode_enabled(vha))
+               return;
+
+       for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+               pkt->u.raw.signature = ATIO_PROCESSED;
+               pkt++;
+       }
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       struct atio_from_isp *pkt;
+       int cnt, i;
+
+       if (!vha->flags.online)
+               return;
+
+       while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+               pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+               cnt = pkt->u.raw.entry_count;
+
+               qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+
+               for (i = 0; i < cnt; i++) {
+                       ha->tgt.atio_ring_index++;
+                       if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+                               ha->tgt.atio_ring_index = 0;
+                               ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+                       } else
+                               ha->tgt.atio_ring_ptr++;
+
+                       pkt->u.raw.signature = ATIO_PROCESSED;
+                       pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+               }
+               wmb();
+       }
+
+       /* Adjust ring index */
+       WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+/* FIXME: atio_q in/out for ha->mqenable=1..? */
+       if (ha->mqenable) {
+#if 0
+               WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
+               WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
+               RD_REG_DWORD(&reg->isp25mq.atio_q_out);
+#endif
+       } else {
+               /* Setup APTIO registers for target mode */
+               WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
+               WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
+               RD_REG_DWORD(&reg->isp24.atio_q_out);
+       }
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (qla_tgt_mode_enabled(vha)) {
+               if (!ha->tgt.saved_set) {
+                       /* We save only once */
+                       ha->tgt.saved_exchange_count = nv->exchange_count;
+                       ha->tgt.saved_firmware_options_1 =
+                           nv->firmware_options_1;
+                       ha->tgt.saved_firmware_options_2 =
+                           nv->firmware_options_2;
+                       ha->tgt.saved_firmware_options_3 =
+                           nv->firmware_options_3;
+                       ha->tgt.saved_set = 1;
+               }
+
+               nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+               /* Enable target mode */
+               nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+               /* Disable ini mode, if requested */
+               if (!qla_ini_mode_enabled(vha))
+                       nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+
+               /* Disable Full Login after LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+               /* Enable initial LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+               /* Enable FC tapes support */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+               /* Disable Full Login after LIP */
+               nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+               /* Enable target PRLI control */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+       } else {
+               if (ha->tgt.saved_set) {
+                       nv->exchange_count = ha->tgt.saved_exchange_count;
+                       nv->firmware_options_1 =
+                           ha->tgt.saved_firmware_options_1;
+                       nv->firmware_options_2 =
+                           ha->tgt.saved_firmware_options_2;
+                       nv->firmware_options_3 =
+                           ha->tgt.saved_firmware_options_3;
+               }
+               return;
+       }
+
+       /* out-of-order frames reassembly */
+       nv->firmware_options_3 |= BIT_6|BIT_9;
+
+       if (ha->tgt.enable_class_2) {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) =
+                               FC_COS_CLASS2 | FC_COS_CLASS3;
+
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+       } else {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+               nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+       }
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+       struct init_cb_24xx *icb)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ha->tgt.node_name_set) {
+               memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+               icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+       }
+}
+
+int
+qlt_24xx_process_response_error(struct scsi_qla_host *vha,
+       struct sts_entry_24xx *pkt)
+{
+       switch (pkt->entry_type) {
+       case ABTS_RECV_24XX:
+       case ABTS_RESP_24XX:
+       case CTIO_TYPE7:
+       case NOTIFY_ACK_TYPE:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+       struct vp_config_entry_24xx *vpmod)
+{
+       if (qla_tgt_mode_enabled(vha))
+               vpmod->options_idx1 &= ~BIT_5;
+       /* Disable ini mode, if requested */
+       if (!qla_ini_mode_enabled(vha))
+               vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       mutex_init(&ha->tgt.tgt_mutex);
+       mutex_init(&ha->tgt.tgt_host_action_mutex);
+       qlt_clear_mode(base_vha);
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return 0;
+
+       ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+           MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+       if (!ha->tgt.tgt_vp_map)
+               return -ENOMEM;
+
+       ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+           (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+           &ha->tgt.atio_dma, GFP_KERNEL);
+       if (!ha->tgt.atio_ring) {
+               kfree(ha->tgt.tgt_vp_map);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       if (ha->tgt.atio_ring) {
+               dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+                   sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+                   ha->tgt.atio_dma);
+       }
+       kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       switch (cmd) {
+       case SET_VP_IDX:
+               vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+               break;
+       case SET_AL_PA:
+               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+               break;
+       case RESET_VP_IDX:
+               vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+               break;
+       case RESET_AL_PA:
+               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+               break;
+       }
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+       if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+               ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+       else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+               ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+       else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+               ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+       else
+               return false;
+
+       return true;
+}
+
+int __init qlt_init(void)
+{
+       int ret;
+
+       if (!qlt_parse_ini_mode()) {
+               ql_log(ql_log_fatal, NULL, 0xe06b,
+                   "qlt_parse_ini_mode() failed\n");
+               return -EINVAL;
+       }
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return 0;
+
+       qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
+           sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
+           NULL);
+       if (!qla_tgt_cmd_cachep) {
+               ql_log(ql_log_fatal, NULL, 0xe06c,
+                   "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
+               return -ENOMEM;
+       }
+
+       qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+           sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+           qla_tgt_mgmt_cmd), 0, NULL);
+       if (!qla_tgt_mgmt_cmd_cachep) {
+               ql_log(ql_log_fatal, NULL, 0xe06d,
+                   "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+           mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+       if (!qla_tgt_mgmt_cmd_mempool) {
+               ql_log(ql_log_fatal, NULL, 0xe06e,
+                   "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+               ret = -ENOMEM;
+               goto out_mgmt_cmd_cachep;
+       }
+
+       qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+       if (!qla_tgt_wq) {
+               ql_log(ql_log_fatal, NULL, 0xe06f,
+                   "alloc_workqueue for qla_tgt_wq failed\n");
+               ret = -ENOMEM;
+               goto out_cmd_mempool;
+       }
+       /*
+        * Return 1 to signal that initiator-mode is being disabled
+        */
+       return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+       mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_mgmt_cmd_cachep:
+       kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+out:
+       kmem_cache_destroy(qla_tgt_cmd_cachep);
+       return ret;
+}
+
+void qlt_exit(void)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       destroy_workqueue(qla_tgt_wq);
+       mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+       kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+       kmem_cache_destroy(qla_tgt_cmd_cachep);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644 (file)
index 0000000..9f9ef16
--- /dev/null
@@ -0,0 +1,1004 @@
+/*
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  Additional file for the target driver support.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC   269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC   57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED  "disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED   "enabled"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE     0
+#define QLA2XXX_INI_MODE_DISABLED      1
+#define QLA2XXX_INI_MODE_ENABLED       2
+
+#define QLA2XXX_COMMAND_COUNT_INIT     250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK    BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK  BIT_30
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0        0
+#define OF_SS_MODE_1        1
+#define OF_SS_MODE_2        2
+#define OF_SS_MODE_3        3
+
+#define OF_EXPL_CONF        BIT_5       /* Explicit Confirmation Requested */
+#define OF_DATA_IN          BIT_6       /* Data in to initiator */
+                                       /*  (data from target to initiator) */
+#define OF_DATA_OUT         BIT_7       /* Data out from initiator */
+                                       /*  (data from initiator to target) */
+#define OF_NO_DATA          (BIT_7 | BIT_6)
+#define OF_INC_RC           BIT_8       /* Increment command resource count */
+#define OF_FAST_POST        BIT_9       /* Enable mailbox fast posting. */
+#define OF_CONF_REQ         BIT_13      /* Confirmation Requested */
+#define OF_TERM_EXCH        BIT_14      /* Terminate exchange */
+#define OF_SSTS             BIT_15      /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32     3
+#define QLA_TGT_DATASEGS_PER_CONT32    7
+#define QLA_TGT_MAX_SG32(ql) \
+       (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+               QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64     2
+#define QLA_TGT_DATASEGS_PER_CONT64    5
+#define QLA_TGT_MAX_SG64(ql) \
+       (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+               QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX  1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+       (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+               QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha))                        \
+                        ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
+                        : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D         /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ *             This is sent by the ISP to the Target driver.
+ *             This IOCB would have report of events sent by the
+ *             initiator, that needs to be handled by the target
+ *             driver immediately.
+ */
+struct imm_ntfy_from_isp {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       union {
+               struct {
+                       uint32_t sys_define_2; /* System defined. */
+                       target_id_t target;
+                       uint16_t lun;
+                       uint8_t  target_id;
+                       uint8_t  reserved_1;
+                       uint16_t status_modifier;
+                       uint16_t status;
+                       uint16_t task_flags;
+                       uint16_t seq_id;
+                       uint16_t srr_rx_id;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT        0x5
+#define SRR_IU_STATUS  0x7
+                       uint16_t srr_ox_id;
+                       uint8_t reserved_2[28];
+               } isp2x;
+               struct {
+                       uint32_t reserved;
+                       uint16_t nport_handle;
+                       uint16_t reserved_2;
+                       uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO   BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB     BIT_0
+                       uint16_t srr_rx_id;
+                       uint16_t status;
+                       uint8_t  status_subcode;
+                       uint8_t  reserved_3;
+                       uint32_t exchange_address;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+                       uint16_t srr_ox_id;
+                       uint8_t  reserved_4[19];
+                       uint8_t  vp_index;
+                       uint32_t reserved_5;
+                       uint8_t  port_id[3];
+                       uint8_t  reserved_6;
+               } isp24;
+       } u;
+       uint16_t reserved_7;
+       uint16_t ox_id;
+} __packed;
+#endif
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E     /* Notify acknowledge entry. */
+/*
+ * ISP queue - notify acknowledge entry structure definition.
+ *             This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       union {
+               struct {
+                       uint32_t sys_define_2; /* System defined. */
+                       target_id_t target;
+                       uint8_t  target_id;
+                       uint8_t  reserved_1;
+                       uint16_t flags;
+                       uint16_t resp_code;
+                       uint16_t status;
+                       uint16_t task_flags;
+                       uint16_t seq_id;
+                       uint16_t srr_rx_id;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+                       uint16_t srr_flags;
+                       uint16_t srr_reject_code;
+                       uint8_t  srr_reject_vendor_uniq;
+                       uint8_t  srr_reject_code_expl;
+                       uint8_t  reserved_2[24];
+               } isp2x;
+               struct {
+                       uint32_t handle;
+                       uint16_t nport_handle;
+                       uint16_t reserved_1;
+                       uint16_t flags;
+                       uint16_t srr_rx_id;
+                       uint16_t status;
+                       uint8_t  status_subcode;
+                       uint8_t  reserved_3;
+                       uint32_t exchange_address;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+                       uint16_t srr_flags;
+                       uint8_t  reserved_4[19];
+                       uint8_t  vp_index;
+                       uint8_t  srr_reject_vendor_uniq;
+                       uint8_t  srr_reject_code_expl;
+                       uint8_t  srr_reject_code;
+                       uint8_t  reserved_5[5];
+               } isp24;
+       } u;
+       uint8_t  reserved[2];
+       uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT    0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT    1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL               0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
+
+#define NOTIFY_ACK_SUCCESS      0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
+ *             This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+       uint8_t  entry_type;            /* Entry type. */
+       uint8_t  entry_count;           /* Entry count. */
+       uint8_t  sys_define;            /* System defined. */
+       uint8_t  entry_status;          /* Entry Status. */
+       uint32_t handle;                /* System defined handle */
+       target_id_t target;
+       uint16_t rx_id;
+       uint16_t flags;
+       uint16_t status;
+       uint16_t timeout;               /* 0 = 30 seconds, 0xFFFF = disable */
+       uint16_t dseg_count;            /* Data segment count. */
+       uint32_t relative_offset;
+       uint32_t residual;
+       uint16_t reserved_1[3];
+       uint16_t scsi_status;
+       uint32_t transfer_length;
+       uint32_t dseg_0_address;        /* Data segment 0 address. */
+       uint32_t dseg_0_length;         /* Data segment 0 length. */
+       uint32_t dseg_1_address;        /* Data segment 1 address. */
+       uint32_t dseg_1_length;         /* Data segment 1 length. */
+       uint32_t dseg_2_address;        /* Data segment 2 address. */
+       uint32_t dseg_2_length;         /* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID       0x07
+#define ATIO_CANT_PROV_CAP      0x16
+#define ATIO_CDB_VALID          0x3D
+
+#define ATIO_EXEC_READ          BIT_1
+#define ATIO_EXEC_WRITE         BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS                   0x01
+#define CTIO_ABORTED                   0x02
+#define CTIO_INVALID_RX_ID             0x08
+#define CTIO_TIMEOUT                   0x0B
+#define CTIO_LIP_RESET                 0x0E
+#define CTIO_TARGET_RESET              0x17
+#define CTIO_PORT_UNAVAILABLE          0x28
+#define CTIO_PORT_LOGGED_OUT           0x29
+#define CTIO_PORT_CONF_CHANGED         0x2A
+#define CTIO_SRR_RECEIVED              0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE  0x17            /* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+       uint8_t  r_ctl;
+       uint8_t  d_id[3];
+       uint8_t  cs_ctl;
+       uint8_t  s_id[3];
+       uint8_t  type;
+       uint8_t  f_ctl[3];
+       uint8_t  seq_id;
+       uint8_t  df_ctl;
+       uint16_t seq_cnt;
+       uint16_t ox_id;
+       uint16_t rx_id;
+       uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+       uint8_t  d_id[3];
+       uint8_t  r_ctl;
+       uint8_t  s_id[3];
+       uint8_t  cs_ctl;
+       uint8_t  f_ctl[3];
+       uint8_t  type;
+       uint16_t seq_cnt;
+       uint8_t  df_ctl;
+       uint8_t  seq_id;
+       uint16_t rx_id;
+       uint16_t ox_id;
+       uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP        BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP        BIT_22
+#define F_CTL_LAST_SEQ         BIT_20
+#define F_CTL_END_SEQ          BIT_19
+#define F_CTL_SEQ_INITIATIVE   BIT_16
+
+#define R_CTL_BASIC_LINK_SERV  0x80
+#define R_CTL_B_ACC            0x4
+#define R_CTL_B_RJT            0x5
+
+struct atio7_fcp_cmnd {
+       uint64_t lun;
+       uint8_t  cmnd_ref;
+       uint8_t  task_attr:3;
+       uint8_t  reserved:5;
+       uint8_t  task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA           6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET                5
+#define FCP_CMND_TASK_MGMT_LU_RESET            4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET      2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET      1
+       uint8_t  wrdata:1;
+       uint8_t  rddata:1;
+       uint8_t  add_cdb_len:6;
+       uint8_t  cdb[16];
+       /*
+        * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+        * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+        * BUILD_BUG_ON in qlt_init().
+        */
+       uint8_t  add_cdb[4];
+       /* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
+ *             This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+       union {
+               struct {
+                       uint16_t entry_hdr;
+                       uint8_t  sys_define;   /* System defined. */
+                       uint8_t  entry_status; /* Entry Status.   */
+                       uint32_t sys_define_2; /* System defined. */
+                       target_id_t target;
+                       uint16_t rx_id;
+                       uint16_t flags;
+                       uint16_t status;
+                       uint8_t  command_ref;
+                       uint8_t  task_codes;
+                       uint8_t  task_flags;
+                       uint8_t  execution_codes;
+                       uint8_t  cdb[MAX_CMDSZ];
+                       uint32_t data_length;
+                       uint16_t lun;
+                       uint8_t  initiator_port_name[WWN_SIZE]; /* on qla23xx */
+                       uint16_t reserved_32[6];
+                       uint16_t ox_id;
+               } isp2x;
+               struct {
+                       uint16_t entry_hdr;
+                       uint8_t  fcp_cmnd_len_low;
+                       uint8_t  fcp_cmnd_len_high:4;
+                       uint8_t  attr:4;
+                       uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN  0xFFFFFFFF
+                       struct fcp_hdr fcp_hdr;
+                       struct atio7_fcp_cmnd fcp_cmnd;
+               } isp24;
+               struct {
+                       uint8_t  entry_type;    /* Entry type. */
+                       uint8_t  entry_count;   /* Entry count. */
+                       uint8_t  data[58];
+                       uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
+               } raw;
+       } u;
+} __packed;
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ *             This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;                    /* System defined handle */
+       uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED     0xFFFF
+       uint16_t timeout;
+       uint16_t dseg_count;                /* Data segment count. */
+       uint8_t  vp_index;
+       uint8_t  add_flags;
+       uint8_t  initiator_id[3];
+       uint8_t  reserved;
+       uint32_t exchange_addr;
+       union {
+               struct {
+                       uint16_t reserved1;
+                       uint16_t flags;
+                       uint32_t residual;
+                       uint16_t ox_id;
+                       uint16_t scsi_status;
+                       uint32_t relative_offset;
+                       uint32_t reserved2;
+                       uint32_t transfer_length;
+                       uint32_t reserved3;
+                       /* Data segment 0 address. */
+                       uint32_t dseg_0_address[2];
+                       /* Data segment 0 length. */
+                       uint32_t dseg_0_length;
+               } status0;
+               struct {
+                       uint16_t sense_length;
+                       uint16_t flags;
+                       uint32_t residual;
+                       uint16_t ox_id;
+                       uint16_t scsi_status;
+                       uint16_t response_len;
+                       uint16_t reserved;
+                       uint8_t sense_data[24];
+               } status1;
+       } u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;                    /* System defined handle */
+       uint16_t status;
+       uint16_t timeout;
+       uint16_t dseg_count;                /* Data segment count. */
+       uint8_t  vp_index;
+       uint8_t  reserved1[5];
+       uint32_t exchange_address;
+       uint16_t reserved2;
+       uint16_t flags;
+       uint32_t residual;
+       uint16_t ox_id;
+       uint16_t reserved3;
+       uint32_t relative_offset;
+       uint8_t  reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS                BIT_15
+#define CTIO7_FLAGS_TERMINATE          BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ                BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO      BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0      0
+#define CTIO7_FLAGS_STATUS_MODE_1      BIT_6
+#define CTIO7_FLAGS_EXPLICIT_CONFORM   BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF     BIT_4
+#define CTIO7_FLAGS_DSD_PTR            BIT_2
+#define CTIO7_FLAGS_DATA_IN            BIT_1
+#define CTIO7_FLAGS_DATA_OUT           BIT_0
+
+#define ELS_PLOGI                      0x3
+#define ELS_FLOGI                      0x4
+#define ELS_LOGO                       0x5
+#define ELS_PRLI                       0x20
+#define ELS_PRLO                       0x21
+#define ELS_TPRLO                      0x24
+#define ELS_PDISC                      0x50
+#define ELS_ADISC                      0x52
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX         0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX         0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue - ABTS received IOCB entry structure definition for 24xx.
+ *             The ABTS BLS received from the wire is sent to the
+ *             target driver by the ISP 24xx.
+ *             The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint8_t  reserved_1[6];
+       uint16_t nport_handle;
+       uint8_t  reserved_2[2];
+       uint8_t  vp_index;
+       uint8_t  reserved_3:4;
+       uint8_t  sof_type:4;
+       uint32_t exchange_address;
+       struct fcp_hdr_le fcp_hdr_le;
+       uint8_t  reserved_4[16];
+       uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ           BIT_0
+
+struct ba_acc_le {
+       uint16_t reserved;
+       uint8_t  seq_id_last;
+       uint8_t  seq_id_valid;
+#define SEQ_ID_VALID   0x80
+#define SEQ_ID_INVALID 0x00
+       uint16_t rx_id;
+       uint16_t ox_id;
+       uint16_t high_seq_cnt;
+       uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+       uint8_t vendor_uniq;
+       uint8_t reason_expl;
+       uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND     0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM   0x9
+       uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
+ *             The ABTS response to the ABTS received is sent by the
+ *             target driver to the ISP 24xx.
+ *             The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;
+       uint16_t reserved_1;
+       uint16_t nport_handle;
+       uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG      BIT_0
+       uint8_t  vp_index;
+       uint8_t  reserved_3:4;
+       uint8_t  sof_type:4;
+       uint32_t exchange_address;
+       struct fcp_hdr_le fcp_hdr_le;
+       union {
+               struct ba_acc_le ba_acct;
+               struct ba_rjt_le ba_rjt;
+       } __packed payload;
+       uint32_t reserved_4;
+       uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
+ *             The ABTS response with completion status to the ABTS response
+ *             (sent by the target driver to the ISP 24xx) is sent by the
+ *             ISP24xx firmware to the target driver.
+ *             The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;
+       uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS                0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR  0x31
+       uint16_t nport_handle;
+       uint16_t reserved_1;
+       uint8_t  reserved_2;
+       uint8_t  reserved_3:4;
+       uint8_t  sof_type:4;
+       uint32_t exchange_address;
+       struct fcp_hdr_le fcp_hdr_le;
+       uint8_t reserved_4[8];
+       uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM    0x1E
+       uint32_t error_subcode2;
+       uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct qla_tgt_sess;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+       int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+                       unsigned char *, uint32_t, int, int, int);
+       int (*handle_data)(struct qla_tgt_cmd *);
+       int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+                       uint32_t);
+       void (*free_cmd)(struct qla_tgt_cmd *);
+       void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+       void (*free_session)(struct qla_tgt_sess *);
+
+       int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+                                       void *, uint8_t *, uint16_t);
+       struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+                                               const uint16_t);
+       struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+                                               const uint8_t *);
+       void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
+       void (*put_sess)(struct qla_tgt_sess *);
+       void (*shutdown_sess)(struct qla_tgt_sess *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT                        10      /* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME    60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET          0x000E
+#define IMM_NTFY_LIP_LINK_REINIT    0x000F
+#define IMM_NTFY_IOCB_OVERFLOW      0x0016
+#define IMM_NTFY_ABORT_TASK         0x0020
+#define IMM_NTFY_PORT_LOGOUT        0x0029
+#define IMM_NTFY_PORT_CONFIG        0x002A
+#define IMM_NTFY_GLBL_TPRLO         0x002D
+#define IMM_NTFY_GLBL_LOGO          0x002E
+#define IMM_NTFY_RESOURCE           0x0034
+#define IMM_NTFY_MSG_RX             0x0036
+#define IMM_NTFY_SRR                0x0045
+#define IMM_NTFY_ELS                0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT    8
+
+#define QLA_TGT_CLEAR_ACA               0x40
+#define QLA_TGT_TARGET_RESET            0x20
+#define QLA_TGT_LUN_RESET               0x10
+#define QLA_TGT_CLEAR_TS                0x04
+#define QLA_TGT_ABORT_TS                0x02
+#define QLA_TGT_ABORT_ALL_SESS          0xFFFF
+#define QLA_TGT_ABORT_ALL               0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS         0xFFFD
+#define QLA_TGT_NEXUS_LOSS              0xFFFC
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT        BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET  BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW              0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA                1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN          2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED                3 /* target done processing */
+#define QLA_TGT_STATE_ABORTED          4 /* Command aborted */
+
+/* Special handles */
+#define QLA_TGT_NULL_HANDLE    0
+#define QLA_TGT_SKIP_HANDLE    (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE           0
+#define ATIO_HEAD_OF_QUEUE          1
+#define ATIO_ORDERED_QUEUE          2
+#define ATIO_ACA_QUEUE              4
+#define ATIO_UNTAGGED               5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define        FC_TM_SUCCESS               0
+#define        FC_TM_BAD_FCP_DATA          1
+#define        FC_TM_BAD_CMD               2
+#define        FC_TM_FCP_DATA_MISMATCH     3
+#define        FC_TM_REJECT                4
+#define FC_TM_FAILED                5
+
+/*
+ * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+ * terminated, so no more actions is needed and success should be returned
+ * to target.
+ */
+#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED      0x1717
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense)  ((sense != NULL) && \
+                               (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+       uint8_t port_name[WWN_SIZE];
+       uint16_t loop_id;
+       uint16_t reserved;
+};
+
+struct qla_tgt {
+       struct scsi_qla_host *vha;
+       struct qla_hw_data *ha;
+
+       /*
+        * To sync between IRQ handlers and qlt_target_release(). Needed,
+        * because req_pkt() can drop/reaquire HW lock inside. Protected by
+        * HW lock.
+        */
+       int irq_cmd_count;
+
+       int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+
+       /* Target's flags, serialized by pha->hardware_lock */
+       unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
+       unsigned int link_reinit_iocb_pending:1;
+
+       /*
+        * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+        * OR hardware_lock for reading.
+        */
+       int tgt_stop; /* the target mode driver is being stopped */
+       int tgt_stopped; /* the target mode driver has been stopped */
+
+       /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+       int sess_count;
+
+       /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
+       struct list_head sess_list;
+
+       /* Protected by hardware_lock */
+       struct list_head del_sess_list;
+       struct delayed_work sess_del_work;
+
+       spinlock_t sess_work_lock;
+       struct list_head sess_works_list;
+       struct work_struct sess_work;
+
+       struct imm_ntfy_from_isp link_reinit_iocb;
+       wait_queue_head_t waitQ;
+       int notify_ack_expected;
+       int abts_resp_expected;
+       int modify_lun_expected;
+
+       int ctio_srr_id;
+       int imm_srr_id;
+       spinlock_t srr_lock;
+       struct list_head srr_ctio_list;
+       struct list_head srr_imm_list;
+       struct work_struct srr_work;
+
+       atomic_t tgt_global_resets_count;
+
+       struct list_head tgt_list_entry;
+};
+
+/*
+ * Equivilant to IT Nexus (Initiator-Target)
+ */
+struct qla_tgt_sess {
+       uint16_t loop_id;
+       port_id_t s_id;
+
+       unsigned int conf_compl_supported:1;
+       unsigned int deleted:1;
+       unsigned int local:1;
+       unsigned int tearing_down:1;
+
+       struct se_session *se_sess;
+       struct scsi_qla_host *vha;
+       struct qla_tgt *tgt;
+
+       struct list_head sess_list_entry;
+       unsigned long expires;
+       struct list_head del_list_entry;
+
+       uint8_t port_name[WWN_SIZE];
+       struct work_struct free_work;
+};
+
+struct qla_tgt_cmd {
+       struct qla_tgt_sess *sess;
+       int state;
+       struct se_cmd se_cmd;
+       struct work_struct free_work;
+       struct work_struct work;
+       /* Sense buffer that will be mapped into outgoing status */
+       unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+       /* to save extra sess dereferences */
+       unsigned int conf_compl_supported:1;
+       unsigned int sg_mapped:1;
+       unsigned int free_sg:1;
+       unsigned int aborted:1; /* Needed in case of SRR */
+       unsigned int write_data_transferred:1;
+
+       struct scatterlist *sg; /* cmd data buffer SG vector */
+       int sg_cnt;             /* SG segments count */
+       int bufflen;            /* cmd buffer length */
+       int offset;
+       uint32_t tag;
+       uint32_t unpacked_lun;
+       enum dma_data_direction dma_data_direction;
+
+       uint16_t loop_id;       /* to save extra sess dereferences */
+       struct qla_tgt *tgt;    /* to save extra sess dereferences */
+       struct scsi_qla_host *vha;
+       struct list_head cmd_list;
+
+       struct atio_from_isp atio;
+};
+
+struct qla_tgt_sess_work_param {
+       struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT        1
+#define QLA_TGT_SESS_WORK_TM   2
+       int type;
+
+       union {
+               struct abts_recv_from_24xx abts;
+               struct imm_ntfy_from_isp tm_iocb;
+               struct atio_from_isp tm_iocb2;
+       };
+};
+
+struct qla_tgt_mgmt_cmd {
+       uint8_t tmr_func;
+       uint8_t fc_tm_rsp;
+       struct qla_tgt_sess *sess;
+       struct se_cmd se_cmd;
+       struct work_struct free_work;
+       unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK 1
+       union {
+               struct atio_from_isp atio;
+               struct imm_ntfy_from_isp imm_ntfy;
+               struct abts_recv_from_24xx abts;
+       } __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+       struct qla_tgt_cmd *cmd;
+       struct qla_tgt *tgt;
+       void *pkt;
+       struct scatterlist *sg; /* cmd data buffer SG vector */
+       int seg_cnt;
+       int req_cnt;
+       uint16_t rq_result;
+       uint16_t scsi_status;
+       unsigned char *sense_buffer;
+       int sense_buffer_len;
+       int residual;
+       int add_status_pkt;
+};
+
+struct qla_tgt_srr_imm {
+       struct list_head srr_list_entry;
+       int srr_id;
+       struct imm_ntfy_from_isp imm_ntfy;
+};
+
+struct qla_tgt_srr_ctio {
+       struct list_head srr_list_entry;
+       int srr_id;
+       struct qla_tgt_cmd *cmd;
+};
+
+#define QLA_TGT_XMIT_DATA              1
+#define QLA_TGT_XMIT_STATUS            2
+#define QLA_TGT_XMIT_ALL               (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+
+extern struct qla_tgt_data qla_target;
+/*
+ * Internal function prototypes
+ */
+void qlt_disable_vha(struct scsi_qla_host *);
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
+                       int (*callback)(struct scsi_qla_host *), void *);
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct qla_tgt_sess *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_set_mode(struct scsi_qla_host *ha);
+extern void qlt_clear_mode(struct scsi_qla_host *ha);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+       return ha->host->active_mode & MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+       return ha->host->active_mode & MODE_INITIATOR;
+}
+
+static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+{
+       if (ha->host->active_mode & MODE_INITIATOR)
+               ha->host->active_mode &= ~MODE_INITIATOR;
+       else
+               ha->host->active_mode |= MODE_INITIATOR;
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
+       struct atio_from_isp *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *,
+       device_reg_t __iomem *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+       struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+       struct init_cb_24xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+       struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+       struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern void qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644 (file)
index 0000000..6e64314
--- /dev/null
@@ -0,0 +1,1919 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL)
+ * version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+struct workqueue_struct *tcm_qla2xxx_free_wq;
+struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
+{
+       return 1;
+}
+
+static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
+{
+       return 0;
+}
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+       const char *cp;
+       char c;
+       u32 nibble;
+       u32 byte = 0;
+       u32 pos = 0;
+       u32 err;
+
+       *wwn = 0;
+       for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+               c = *cp;
+               if (c == '\n' && cp[1] == '\0')
+                       continue;
+               if (strict && pos++ == 2 && byte++ < 7) {
+                       pos = 0;
+                       if (c == ':')
+                               continue;
+                       err = 1;
+                       goto fail;
+               }
+               if (c == '\0') {
+                       err = 2;
+                       if (strict && byte != 8)
+                               goto fail;
+                       return cp - name;
+               }
+               err = 3;
+               if (isdigit(c))
+                       nibble = c - '0';
+               else if (isxdigit(c) && (islower(c) || !strict))
+                       nibble = tolower(c) - 'a' + 10;
+               else
+                       goto fail;
+               *wwn = (*wwn << 4) | nibble;
+       }
+       err = 4;
+fail:
+       pr_debug("err %u len %zu pos %u byte %u\n",
+                       err, cp - name, pos, byte);
+       return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+       u8 b[8];
+
+       put_unaligned_be64(wwn, b);
+       return snprintf(buf, len,
+               "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+               b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+       return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+       unsigned int i, j;
+       u8 wwn[8];
+
+       memset(wwn, 0, sizeof(wwn));
+
+       /* Validate and store the new name */
+       for (i = 0, j = 0; i < 16; i++) {
+               int value;
+
+               value = hex_to_bin(*ns++);
+               if (value >= 0)
+                       j = (j << 4) | value;
+               else
+                       return -EINVAL;
+
+               if (i % 2) {
+                       wwn[i/2] = j & 0xff;
+                       j = 0;
+               }
+       }
+
+       *nm = wwn_to_u64(wwn);
+       return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+       const char *name,
+       size_t count,
+       u64 *wwpn,
+       u64 *wwnn)
+{
+       unsigned int cnt = count;
+       int rc;
+
+       *wwpn = 0;
+       *wwnn = 0;
+
+       /* count may include a LF at end of string */
+       if (name[cnt-1] == '\n')
+               cnt--;
+
+       /* validate we have enough characters for WWPN */
+       if ((cnt != (16+1+16)) || (name[16] != ':'))
+               return -EINVAL;
+
+       rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+       if (rc != 0)
+               return rc;
+
+       rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+       if (rc != 0)
+               return rc;
+
+       return 0;
+}
+
+static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
+                                       u64 wwpn, u64 wwnn)
+{
+       u8 b[8], b2[8];
+
+       put_unaligned_be64(wwpn, b);
+       put_unaligned_be64(wwnn, b2);
+       return snprintf(buf, len,
+               "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
+               "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+               b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+               b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+       return "qla2xxx_npiv";
+}
+
+static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       u8 proto_id;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               proto_id = fc_get_fabric_proto_ident(se_tpg);
+               break;
+       }
+
+       return proto_id;
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+       return &lport->lport_name[0];
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+       return &lport->lport_npiv_name[0];
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       return tpg->lport_tpgt;
+}
+
+static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+       return 1;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id(
+       struct se_portal_group *se_tpg,
+       struct se_node_acl *se_nacl,
+       struct t10_pr_registration *pr_reg,
+       int *format_code,
+       unsigned char *buf)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       int ret = 0;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+                                       format_code, buf);
+               break;
+       }
+
+       return ret;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id_len(
+       struct se_portal_group *se_tpg,
+       struct se_node_acl *se_nacl,
+       struct t10_pr_registration *pr_reg,
+       int *format_code)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       int ret = 0;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+                                       format_code);
+               break;
+       }
+
+       return ret;
+}
+
+static char *tcm_qla2xxx_parse_pr_out_transport_id(
+       struct se_portal_group *se_tpg,
+       const char *buf,
+       u32 *out_tid_len,
+       char **port_nexus_ptr)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       char *tid = NULL;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+                                       port_nexus_ptr);
+               break;
+       }
+
+       return tid;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
+       struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_nacl *nacl;
+
+       nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+       if (!nacl) {
+               pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+               return NULL;
+       }
+
+       return &nacl->se_node_acl;
+}
+
+static void tcm_qla2xxx_release_fabric_acl(
+       struct se_portal_group *se_tpg,
+       struct se_node_acl *se_nacl)
+{
+       struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+                       struct tcm_qla2xxx_nacl, se_node_acl);
+       kfree(nacl);
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+       struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+                       struct qla_tgt_mgmt_cmd, free_work);
+
+       transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+       INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+       queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+       INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+       queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+       return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd;
+
+       if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+               struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+                               struct qla_tgt_mgmt_cmd, se_cmd);
+               qlt_free_mcmd(mcmd);
+               return;
+       }
+
+       cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+       qlt_free_cmd(cmd);
+}
+
+static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+       struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+       struct scsi_qla_host *vha;
+       unsigned long flags;
+
+       BUG_ON(!sess);
+       vha = sess->vha;
+
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       sess->tearing_down = 1;
+       target_splice_sess_cmd_list(se_sess);
+       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+       return 1;
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+       struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+       struct scsi_qla_host *vha;
+       unsigned long flags;
+
+       BUG_ON(!sess);
+       vha = sess->vha;
+
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       qlt_unreg_sess(sess);
+       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+       return 0;
+}
+
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ).  However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
+{
+       if (se_cmd->se_cmd_flags & SCF_BIDI)
+               return DMA_BIDIRECTIONAL;
+
+       switch (se_cmd->data_direction) {
+       case DMA_TO_DEVICE:
+               return DMA_FROM_DEVICE;
+       case DMA_FROM_DEVICE:
+               return DMA_TO_DEVICE;
+       case DMA_NONE:
+       default:
+               return DMA_NONE;
+       }
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+
+       cmd->bufflen = se_cmd->data_length;
+       cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+
+       cmd->sg_cnt = se_cmd->t_data_nents;
+       cmd->sg = se_cmd->t_data_sg;
+
+       /*
+        * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+        * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+        */
+       return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+       unsigned long flags;
+       /*
+        * Check for WRITE_PENDING status to determine if we need to wait for
+        * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+        */
+       spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+       if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+           se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+               spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+               wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+                                               3000);
+               return 0;
+       }
+       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+       return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+       return;
+}
+
+static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+
+       return cmd->tag;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+       return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+       unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+       int data_dir, int bidi)
+{
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       struct se_session *se_sess;
+       struct qla_tgt_sess *sess;
+       int flags = TARGET_SCF_ACK_KREF;
+
+       if (bidi)
+               flags |= TARGET_SCF_BIDI_OP;
+
+       sess = cmd->sess;
+       if (!sess) {
+               pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+               return -EINVAL;
+       }
+
+       se_sess = sess->se_sess;
+       if (!se_sess) {
+               pr_err("Unable to locate active struct se_session\n");
+               return -EINVAL;
+       }
+
+       target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+                               cmd->unpacked_lun, data_length, fcp_task_attr,
+                               data_dir, flags);
+       return 0;
+}
+
+static void tcm_qla2xxx_do_rsp(struct work_struct *work)
+{
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       /*
+        * Dispatch ->queue_status from workqueue process context
+        */
+       transport_generic_request_failure(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       unsigned long flags;
+       /*
+        * Ensure that the complete FCP WRITE payload has been received.
+        * Otherwise return an exception via CHECK_CONDITION status.
+        */
+       if (!cmd->write_data_transferred) {
+               /*
+                * Check if se_cmd has already been aborted via LUN_RESET, and
+                * waiting upon completion in tcm_qla2xxx_write_pending_status()
+                */
+               spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+               if (se_cmd->transport_state & CMD_T_ABORTED) {
+                       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+                       complete(&se_cmd->t_transport_stop_comp);
+                       return 0;
+               }
+               spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+               se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+               INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
+               queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+               return 0;
+       }
+       /*
+        * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
+        * status to the backstore processing thread.
+        */
+       return transport_generic_handle_data(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+       uint8_t tmr_func, uint32_t tag)
+{
+       struct qla_tgt_sess *sess = mcmd->sess;
+       struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+       return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+                       tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+
+       cmd->bufflen = se_cmd->data_length;
+       cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+       cmd->sg_cnt = se_cmd->t_data_nents;
+       cmd->sg = se_cmd->t_data_sg;
+       cmd->offset = 0;
+
+       /*
+        * Now queue completed DATA_IN the qla2xxx LLD and response ring
+        */
+       return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+                               se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+       int xmit_type = QLA_TGT_XMIT_STATUS;
+
+       cmd->bufflen = se_cmd->data_length;
+       cmd->sg = NULL;
+       cmd->sg_cnt = 0;
+       cmd->offset = 0;
+       cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+       if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+               /*
+                * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+                * for qla_tgt_xmit_response LLD code
+                */
+               se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+               se_cmd->residual_count = se_cmd->data_length;
+
+               cmd->bufflen = 0;
+       }
+       /*
+        * Now queue status response to qla2xxx LLD code and response ring
+        */
+       return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+       struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+       struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+                               struct qla_tgt_mgmt_cmd, se_cmd);
+
+       pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+                       mcmd, se_tmr->function, se_tmr->response);
+       /*
+        * Do translation between TCM TM response codes and
+        * QLA2xxx FC TM response codes.
+        */
+       switch (se_tmr->response) {
+       case TMR_FUNCTION_COMPLETE:
+               mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+               break;
+       case TMR_TASK_DOES_NOT_EXIST:
+               mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+               break;
+       case TMR_FUNCTION_REJECTED:
+               mcmd->fc_tm_rsp = FC_TM_REJECT;
+               break;
+       case TMR_LUN_DOES_NOT_EXIST:
+       default:
+               mcmd->fc_tm_rsp = FC_TM_FAILED;
+               break;
+       }
+       /*
+        * Queue the TM response to QLA2xxx LLD to build a
+        * CTIO response packet.
+        */
+       qlt_xmit_tm_rsp(mcmd);
+
+       return 0;
+}
+
+static u16 tcm_qla2xxx_get_fabric_sense_len(void)
+{
+       return 0;
+}
+
+static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
+                                       u32 sense_length)
+{
+       return 0;
+}
+
+/* Local pointer to allocated TCM configfs fabric module */
+struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+                       struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+       struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+       struct se_portal_group *se_tpg = se_nacl->se_tpg;
+       struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+       struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+                               struct tcm_qla2xxx_lport, lport_wwn);
+       struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+                               struct tcm_qla2xxx_nacl, se_node_acl);
+       void *node;
+
+       pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+       node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+       WARN_ON(node && (node != se_nacl));
+
+       pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+           se_nacl, nacl->nport_wwnn, nacl->nport_id);
+       /*
+        * Now clear the se_nacl and session pointers from our HW lport lookup
+        * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+        *
+        * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+        * target_wait_for_sess_cmds() before the session waits for outstanding
+        * I/O to complete, to avoid a race between session shutdown execution
+        * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+        */
+       tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+       struct se_session *se_sess = container_of(kref,
+                       struct se_session, sess_kref);
+
+       qlt_unreg_sess(se_sess->fabric_sess_ptr);
+}
+
+static void tcm_qla2xxx_put_session(struct se_session *se_sess)
+{
+       struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+       struct qla_hw_data *ha = sess->vha->hw;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+       tcm_qla2xxx_put_session(sess->se_sess);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+{
+       tcm_qla2xxx_shutdown_session(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+       struct se_portal_group *se_tpg,
+       struct config_group *group,
+       const char *name)
+{
+       struct se_node_acl *se_nacl, *se_nacl_new;
+       struct tcm_qla2xxx_nacl *nacl;
+       u64 wwnn;
+       u32 qla2xxx_nexus_depth;
+
+       if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+               return ERR_PTR(-EINVAL);
+
+       se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+       if (!se_nacl_new)
+               return ERR_PTR(-ENOMEM);
+/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
+       qla2xxx_nexus_depth = 1;
+
+       /*
+        * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+        * when converting a NodeACL from demo mode -> explict
+        */
+       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+                               name, qla2xxx_nexus_depth);
+       if (IS_ERR(se_nacl)) {
+               tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+               return se_nacl;
+       }
+       /*
+        * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+        */
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+       nacl->nport_wwnn = wwnn;
+       tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+
+       return se_nacl;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+       struct se_portal_group *se_tpg = se_acl->se_tpg;
+       struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+                               struct tcm_qla2xxx_nacl, se_node_acl);
+
+       core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+       kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name)                                       \
+                                                                       \
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name(                     \
+       struct se_portal_group *se_tpg,                                 \
+       char *page)                                                     \
+{                                                                      \
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,              \
+                       struct tcm_qla2xxx_tpg, se_tpg);                \
+                                                                       \
+       return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name);        \
+}                                                                      \
+                                                                       \
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name(                    \
+       struct se_portal_group *se_tpg,                                 \
+       const char *page,                                               \
+       size_t count)                                                   \
+{                                                                      \
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,              \
+                       struct tcm_qla2xxx_tpg, se_tpg);                \
+       unsigned long val;                                              \
+       int ret;                                                        \
+                                                                       \
+       ret = kstrtoul(page, 0, &val);                                  \
+       if (ret < 0) {                                                  \
+               pr_err("kstrtoul() failed with"                         \
+                               " ret: %d\n", ret);                     \
+               return -EINVAL;                                         \
+       }                                                               \
+       ret = tcm_qla2xxx_set_attrib_##name(tpg, val);                  \
+                                                                       \
+       return (!ret) ? count : -EINVAL;                                \
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name)                                   \
+                                                                       \
+static int tcm_qla2xxx_set_attrib_##_name(                             \
+       struct tcm_qla2xxx_tpg *tpg,                                    \
+       unsigned long val)                                              \
+{                                                                      \
+       struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib;            \
+                                                                       \
+       if ((val != 0) && (val != 1)) {                                 \
+               pr_err("Illegal boolean value %lu\n", val);             \
+               return -EINVAL;                                         \
+       }                                                               \
+                                                                       \
+       a->_name = val;                                                 \
+       return 0;                                                       \
+}
+
+#define QLA_TPG_ATTR(_name, _mode) \
+       TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+       &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+       &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+       &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+       &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+       NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+       struct se_portal_group *se_tpg,
+       char *page)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                       struct tcm_qla2xxx_tpg, se_tpg);
+
+       return snprintf(page, PAGE_SIZE, "%d\n",
+                       atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+       struct se_portal_group *se_tpg,
+       const char *page,
+       size_t count)
+{
+       struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+       struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                       struct tcm_qla2xxx_tpg, se_tpg);
+       unsigned long op;
+       int rc;
+
+       rc = kstrtoul(page, 0, &op);
+       if (rc < 0) {
+               pr_err("kstrtoul() returned %d\n", rc);
+               return -EINVAL;
+       }
+       if ((op != 1) && (op != 0)) {
+               pr_err("Illegal value for tpg_enable: %lu\n", op);
+               return -EINVAL;
+       }
+
+       if (op) {
+               atomic_set(&tpg->lport_tpg_enabled, 1);
+               qlt_enable_vha(vha);
+       } else {
+               if (!ha->tgt.qla_tgt) {
+                       pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+                       return -ENODEV;
+               }
+               atomic_set(&tpg->lport_tpg_enabled, 0);
+               qlt_stop_phase1(ha->tgt.qla_tgt);
+       }
+
+       return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+       &tcm_qla2xxx_tpg_enable.attr,
+       NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+       struct se_wwn *wwn,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct tcm_qla2xxx_tpg *tpg;
+       unsigned long tpgt;
+       int ret;
+
+       if (strstr(name, "tpgt_") != name)
+               return ERR_PTR(-EINVAL);
+       if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+               return ERR_PTR(-EINVAL);
+
+       if (!lport->qla_npiv_vp && (tpgt != 1)) {
+               pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+               return ERR_PTR(-ENOSYS);
+       }
+
+       tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+       if (!tpg) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       tpg->lport = lport;
+       tpg->lport_tpgt = tpgt;
+       /*
+        * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+        * NodeACLs
+        */
+       QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
+       QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
+       QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+
+       ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       if (ret < 0) {
+               kfree(tpg);
+               return NULL;
+       }
+       /*
+        * Setup local TPG=1 pointer for non NPIV mode.
+        */
+       if (lport->qla_npiv_vp == NULL)
+               lport->tpg_1 = tpg;
+
+       return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                       struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct qla_hw_data *ha = vha->hw;
+       /*
+        * Call into qla2x_target.c LLD logic to shutdown the active
+        * FC Nexuses and disable target mode operation for this qla_hw_data
+        */
+       if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
+               qlt_stop_phase1(ha->tgt.qla_tgt);
+
+       core_tpg_deregister(se_tpg);
+       /*
+        * Clear local TPG=1 pointer for non NPIV mode.
+        */
+       if (lport->qla_npiv_vp == NULL)
+               lport->tpg_1 = NULL;
+
+       kfree(tpg);
+}
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+       struct se_wwn *wwn,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct tcm_qla2xxx_tpg *tpg;
+       unsigned long tpgt;
+       int ret;
+
+       if (strstr(name, "tpgt_") != name)
+               return ERR_PTR(-EINVAL);
+       if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+               return ERR_PTR(-EINVAL);
+
+       tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+       if (!tpg) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       tpg->lport = lport;
+       tpg->lport_tpgt = tpgt;
+
+       ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       if (ret < 0) {
+               kfree(tpg);
+               return NULL;
+       }
+       return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+       scsi_qla_host_t *vha,
+       const uint8_t *s_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       struct se_node_acl *se_nacl;
+       struct tcm_qla2xxx_nacl *nacl;
+       u32 key;
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return NULL;
+       }
+
+       key = (((unsigned long)s_id[0] << 16) |
+              ((unsigned long)s_id[1] << 8) |
+              (unsigned long)s_id[2]);
+       pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+       se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+       if (!se_nacl) {
+               pr_debug("Unable to locate s_id: 0x%06x\n", key);
+               return NULL;
+       }
+       pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+           se_nacl, se_nacl->initiatorname);
+
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+       if (!nacl->qla_tgt_sess) {
+               pr_err("Unable to locate struct qla_tgt_sess\n");
+               return NULL;
+       }
+
+       return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+       struct tcm_qla2xxx_lport *lport,
+       struct se_node_acl *new_se_nacl,
+       struct tcm_qla2xxx_nacl *nacl,
+       struct se_session *se_sess,
+       struct qla_tgt_sess *qla_tgt_sess,
+       uint8_t *s_id)
+{
+       u32 key;
+       void *slot;
+       int rc;
+
+       key = (((unsigned long)s_id[0] << 16) |
+              ((unsigned long)s_id[1] << 8) |
+              (unsigned long)s_id[2]);
+       pr_debug("set_sess_by_s_id: %06x\n", key);
+
+       slot = btree_lookup32(&lport->lport_fcport_map, key);
+       if (!slot) {
+               if (new_se_nacl) {
+                       pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+                       nacl->nport_id = key;
+                       rc = btree_insert32(&lport->lport_fcport_map, key,
+                                       new_se_nacl, GFP_ATOMIC);
+                       if (rc)
+                               printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+                                   (int)key);
+               } else {
+                       pr_debug("Wiping nonexisting fc_port entry\n");
+               }
+
+               qla_tgt_sess->se_sess = se_sess;
+               nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (nacl->qla_tgt_sess) {
+               if (new_se_nacl == NULL) {
+                       pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+                       btree_remove32(&lport->lport_fcport_map, key);
+                       nacl->qla_tgt_sess = NULL;
+                       return;
+               }
+               pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+               btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+               qla_tgt_sess->se_sess = se_sess;
+               nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (new_se_nacl == NULL) {
+               pr_debug("Clearing existing fc_port entry\n");
+               btree_remove32(&lport->lport_fcport_map, key);
+               return;
+       }
+
+       pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+       btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+       qla_tgt_sess->se_sess = se_sess;
+       nacl->qla_tgt_sess = qla_tgt_sess;
+
+       pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
+           nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+       scsi_qla_host_t *vha,
+       const uint16_t loop_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       struct se_node_acl *se_nacl;
+       struct tcm_qla2xxx_nacl *nacl;
+       struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return NULL;
+       }
+
+       pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+       fc_loopid = lport->lport_loopid_map + loop_id;
+       se_nacl = fc_loopid->se_nacl;
+       if (!se_nacl) {
+               pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+                   loop_id);
+               return NULL;
+       }
+
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+       if (!nacl->qla_tgt_sess) {
+               pr_err("Unable to locate struct qla_tgt_sess\n");
+               return NULL;
+       }
+
+       return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+       struct tcm_qla2xxx_lport *lport,
+       struct se_node_acl *new_se_nacl,
+       struct tcm_qla2xxx_nacl *nacl,
+       struct se_session *se_sess,
+       struct qla_tgt_sess *qla_tgt_sess,
+       uint16_t loop_id)
+{
+       struct se_node_acl *saved_nacl;
+       struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+       pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+       fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+                       lport->lport_loopid_map)[loop_id];
+
+       saved_nacl = fc_loopid->se_nacl;
+       if (!saved_nacl) {
+               pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+               fc_loopid->se_nacl = new_se_nacl;
+               if (qla_tgt_sess->se_sess != se_sess)
+                       qla_tgt_sess->se_sess = se_sess;
+               if (nacl->qla_tgt_sess != qla_tgt_sess)
+                       nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (nacl->qla_tgt_sess) {
+               if (new_se_nacl == NULL) {
+                       pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+                       fc_loopid->se_nacl = NULL;
+                       nacl->qla_tgt_sess = NULL;
+                       return;
+               }
+
+               pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+               fc_loopid->se_nacl = new_se_nacl;
+               if (qla_tgt_sess->se_sess != se_sess)
+                       qla_tgt_sess->se_sess = se_sess;
+               if (nacl->qla_tgt_sess != qla_tgt_sess)
+                       nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (new_se_nacl == NULL) {
+               pr_debug("Clearing fc_loopid->se_nacl\n");
+               fc_loopid->se_nacl = NULL;
+               return;
+       }
+
+       pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+       fc_loopid->se_nacl = new_se_nacl;
+       if (qla_tgt_sess->se_sess != se_sess)
+               qla_tgt_sess->se_sess = se_sess;
+       if (nacl->qla_tgt_sess != qla_tgt_sess)
+               nacl->qla_tgt_sess = qla_tgt_sess;
+
+       pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+           nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Should always be called with qla_hw_data->hardware_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+               struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+{
+       struct se_session *se_sess = sess->se_sess;
+       unsigned char be_sid[3];
+
+       be_sid[0] = sess->s_id.b.domain;
+       be_sid[1] = sess->s_id.b.area;
+       be_sid[2] = sess->s_id.b.al_pa;
+
+       tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+                               sess, be_sid);
+       tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+                               sess, sess->loop_id);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+       struct qla_tgt *tgt = sess->tgt;
+       struct qla_hw_data *ha = tgt->ha;
+       struct se_session *se_sess;
+       struct se_node_acl *se_nacl;
+       struct tcm_qla2xxx_lport *lport;
+       struct tcm_qla2xxx_nacl *nacl;
+
+       BUG_ON(in_interrupt());
+
+       se_sess = sess->se_sess;
+       if (!se_sess) {
+               pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+               dump_stack();
+               return;
+       }
+       se_nacl = se_sess->se_node_acl;
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return;
+       }
+       target_wait_for_sess_cmds(se_sess, 0);
+
+       transport_deregister_session_configfs(sess->se_sess);
+       transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+       scsi_qla_host_t *vha,
+       unsigned char *fc_wwpn,
+       void *qla_tgt_sess,
+       uint8_t *s_id,
+       uint16_t loop_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       struct tcm_qla2xxx_tpg *tpg;
+       struct tcm_qla2xxx_nacl *nacl;
+       struct se_portal_group *se_tpg;
+       struct se_node_acl *se_nacl;
+       struct se_session *se_sess;
+       struct qla_tgt_sess *sess = qla_tgt_sess;
+       unsigned char port_name[36];
+       unsigned long flags;
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return -EINVAL;
+       }
+       /*
+        * Locate the TPG=1 reference..
+        */
+       tpg = lport->tpg_1;
+       if (!tpg) {
+               pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+               return -EINVAL;
+       }
+       se_tpg = &tpg->se_tpg;
+
+       se_sess = transport_init_session();
+       if (IS_ERR(se_sess)) {
+               pr_err("Unable to initialize struct se_session\n");
+               return PTR_ERR(se_sess);
+       }
+       /*
+        * Format the FCP Initiator port_name into colon seperated values to
+        * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+        */
+       memset(&port_name, 0, 36);
+       snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+               fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
+               fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+       /*
+        * Locate our struct se_node_acl either from an explict NodeACL created
+        * via ConfigFS, or via running in TPG demo mode.
+        */
+       se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
+                                       port_name);
+       if (!se_sess->se_node_acl) {
+               transport_free_session(se_sess);
+               return -EINVAL;
+       }
+       se_nacl = se_sess->se_node_acl;
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+       /*
+        * And now setup the new se_nacl and session pointers into our HW lport
+        * mappings for fabric S_ID and LOOP_ID.
+        */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+                       qla_tgt_sess, s_id);
+       tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+                       qla_tgt_sess, loop_id);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       /*
+        * Finally register the new FC Nexus with TCM
+        */
+       __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+       return 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+       .handle_cmd             = tcm_qla2xxx_handle_cmd,
+       .handle_data            = tcm_qla2xxx_handle_data,
+       .handle_tmr             = tcm_qla2xxx_handle_tmr,
+       .free_cmd               = tcm_qla2xxx_free_cmd,
+       .free_mcmd              = tcm_qla2xxx_free_mcmd,
+       .free_session           = tcm_qla2xxx_free_session,
+       .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+       .find_sess_by_s_id      = tcm_qla2xxx_find_sess_by_s_id,
+       .find_sess_by_loop_id   = tcm_qla2xxx_find_sess_by_loop_id,
+       .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+       .put_sess               = tcm_qla2xxx_put_sess,
+       .shutdown_sess          = tcm_qla2xxx_shutdown_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+       int rc;
+
+       rc = btree_init32(&lport->lport_fcport_map);
+       if (rc) {
+               pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+               return rc;
+       }
+
+       lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+                               65536);
+       if (!lport->lport_loopid_map) {
+               pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+                   sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+               btree_destroy32(&lport->lport_fcport_map);
+               return -ENOMEM;
+       }
+       memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+              * 65536);
+       pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+              sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+       return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       /*
+        * Setup local pointer to vha, NPIV VP pointer (if present) and
+        * vha->tcm_lport pointer
+        */
+       lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+       lport->qla_vha = vha;
+
+       return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+       struct target_fabric_configfs *tf,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport;
+       u64 wwpn;
+       int ret = -ENODEV;
+
+       if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+               return ERR_PTR(-EINVAL);
+
+       lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+       if (!lport) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       lport->lport_wwpn = wwpn;
+       tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+                               wwpn);
+
+       ret = tcm_qla2xxx_init_lport(lport);
+       if (ret != 0)
+               goto out;
+
+       ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
+                               tcm_qla2xxx_lport_register_cb, lport);
+       if (ret != 0)
+               goto out_lport;
+
+       return &lport->lport_wwn;
+out_lport:
+       vfree(lport->lport_loopid_map);
+       btree_destroy32(&lport->lport_fcport_map);
+out:
+       kfree(lport);
+       return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct se_node_acl *node;
+       u32 key = 0;
+
+       /*
+        * Call into qla2x_target.c LLD logic to complete the
+        * shutdown of struct qla_tgt after the call to
+        * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+        */
+       if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
+               qlt_stop_phase2(ha->tgt.qla_tgt);
+
+       qlt_lport_deregister(vha);
+
+       vfree(lport->lport_loopid_map);
+       btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+               btree_remove32(&lport->lport_fcport_map, key);
+       btree_destroy32(&lport->lport_fcport_map);
+       kfree(lport);
+}
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+       struct target_fabric_configfs *tf,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport;
+       u64 npiv_wwpn, npiv_wwnn;
+       int ret;
+
+       if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
+                               &npiv_wwpn, &npiv_wwnn) < 0)
+               return ERR_PTR(-EINVAL);
+
+       lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+       if (!lport) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       lport->lport_npiv_wwpn = npiv_wwpn;
+       lport->lport_npiv_wwnn = npiv_wwnn;
+       tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
+                       TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+
+/* FIXME: tcm_qla2xxx_npiv_make_lport */
+       ret = -ENOSYS;
+       if (ret != 0)
+               goto out;
+
+       return &lport->lport_wwn;
+out:
+       kfree(lport);
+       return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct Scsi_Host *sh = vha->host;
+       /*
+        * Notify libfc that we want to release the lport->npiv_vport
+        */
+       fc_vport_terminate(lport->npiv_vport);
+
+       scsi_host_put(sh);
+       kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+       struct target_fabric_configfs *tf,
+       char *page)
+{
+       return sprintf(page,
+           "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
+           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+       &tcm_qla2xxx_wwn_version.attr,
+       NULL,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+       .get_fabric_name                = tcm_qla2xxx_get_fabric_name,
+       .get_fabric_proto_ident         = tcm_qla2xxx_get_fabric_proto_ident,
+       .tpg_get_wwn                    = tcm_qla2xxx_get_fabric_wwn,
+       .tpg_get_tag                    = tcm_qla2xxx_get_tag,
+       .tpg_get_default_depth          = tcm_qla2xxx_get_default_depth,
+       .tpg_get_pr_transport_id        = tcm_qla2xxx_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = tcm_qla2xxx_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = tcm_qla2xxx_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = tcm_qla2xxx_check_demo_mode,
+       .tpg_check_demo_mode_cache      = tcm_qla2xxx_check_demo_mode_cache,
+       .tpg_check_demo_mode_write_protect =
+                                       tcm_qla2xxx_check_demo_write_protect,
+       .tpg_check_prod_mode_write_protect =
+                                       tcm_qla2xxx_check_prod_write_protect,
+       .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+       .tpg_alloc_fabric_acl           = tcm_qla2xxx_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = tcm_qla2xxx_release_fabric_acl,
+       .tpg_get_inst_index             = tcm_qla2xxx_tpg_get_inst_index,
+       .new_cmd_map                    = NULL,
+       .check_stop_free                = tcm_qla2xxx_check_stop_free,
+       .release_cmd                    = tcm_qla2xxx_release_cmd,
+       .put_session                    = tcm_qla2xxx_put_session,
+       .shutdown_session               = tcm_qla2xxx_shutdown_session,
+       .close_session                  = tcm_qla2xxx_close_session,
+       .sess_get_index                 = tcm_qla2xxx_sess_get_index,
+       .sess_get_initiator_sid         = NULL,
+       .write_pending                  = tcm_qla2xxx_write_pending,
+       .write_pending_status           = tcm_qla2xxx_write_pending_status,
+       .set_default_node_attributes    = tcm_qla2xxx_set_default_node_attrs,
+       .get_task_tag                   = tcm_qla2xxx_get_task_tag,
+       .get_cmd_state                  = tcm_qla2xxx_get_cmd_state,
+       .queue_data_in                  = tcm_qla2xxx_queue_data_in,
+       .queue_status                   = tcm_qla2xxx_queue_status,
+       .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
+       .get_fabric_sense_len           = tcm_qla2xxx_get_fabric_sense_len,
+       .set_fabric_sense_len           = tcm_qla2xxx_set_fabric_sense_len,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+        */
+       .fabric_make_wwn                = tcm_qla2xxx_make_lport,
+       .fabric_drop_wwn                = tcm_qla2xxx_drop_lport,
+       .fabric_make_tpg                = tcm_qla2xxx_make_tpg,
+       .fabric_drop_tpg                = tcm_qla2xxx_drop_tpg,
+       .fabric_post_link               = NULL,
+       .fabric_pre_unlink              = NULL,
+       .fabric_make_np                 = NULL,
+       .fabric_drop_np                 = NULL,
+       .fabric_make_nodeacl            = tcm_qla2xxx_make_nodeacl,
+       .fabric_drop_nodeacl            = tcm_qla2xxx_drop_nodeacl,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+       .get_fabric_name                = tcm_qla2xxx_npiv_get_fabric_name,
+       .get_fabric_proto_ident         = tcm_qla2xxx_get_fabric_proto_ident,
+       .tpg_get_wwn                    = tcm_qla2xxx_npiv_get_fabric_wwn,
+       .tpg_get_tag                    = tcm_qla2xxx_get_tag,
+       .tpg_get_default_depth          = tcm_qla2xxx_get_default_depth,
+       .tpg_get_pr_transport_id        = tcm_qla2xxx_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = tcm_qla2xxx_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = tcm_qla2xxx_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = tcm_qla2xxx_check_false,
+       .tpg_check_demo_mode_cache      = tcm_qla2xxx_check_true,
+       .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
+       .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+       .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+       .tpg_alloc_fabric_acl           = tcm_qla2xxx_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = tcm_qla2xxx_release_fabric_acl,
+       .tpg_get_inst_index             = tcm_qla2xxx_tpg_get_inst_index,
+       .release_cmd                    = tcm_qla2xxx_release_cmd,
+       .put_session                    = tcm_qla2xxx_put_session,
+       .shutdown_session               = tcm_qla2xxx_shutdown_session,
+       .close_session                  = tcm_qla2xxx_close_session,
+       .sess_get_index                 = tcm_qla2xxx_sess_get_index,
+       .sess_get_initiator_sid         = NULL,
+       .write_pending                  = tcm_qla2xxx_write_pending,
+       .write_pending_status           = tcm_qla2xxx_write_pending_status,
+       .set_default_node_attributes    = tcm_qla2xxx_set_default_node_attrs,
+       .get_task_tag                   = tcm_qla2xxx_get_task_tag,
+       .get_cmd_state                  = tcm_qla2xxx_get_cmd_state,
+       .queue_data_in                  = tcm_qla2xxx_queue_data_in,
+       .queue_status                   = tcm_qla2xxx_queue_status,
+       .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
+       .get_fabric_sense_len           = tcm_qla2xxx_get_fabric_sense_len,
+       .set_fabric_sense_len           = tcm_qla2xxx_set_fabric_sense_len,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+        */
+       .fabric_make_wwn                = tcm_qla2xxx_npiv_make_lport,
+       .fabric_drop_wwn                = tcm_qla2xxx_npiv_drop_lport,
+       .fabric_make_tpg                = tcm_qla2xxx_npiv_make_tpg,
+       .fabric_drop_tpg                = tcm_qla2xxx_drop_tpg,
+       .fabric_post_link               = NULL,
+       .fabric_pre_unlink              = NULL,
+       .fabric_make_np                 = NULL,
+       .fabric_drop_np                 = NULL,
+       .fabric_make_nodeacl            = tcm_qla2xxx_make_nodeacl,
+       .fabric_drop_nodeacl            = tcm_qla2xxx_drop_nodeacl,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+       struct target_fabric_configfs *fabric, *npiv_fabric;
+       int ret;
+
+       pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
+           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           utsname()->machine);
+       /*
+        * Register the top level struct config_item_type with TCM core
+        */
+       fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
+       if (IS_ERR(fabric)) {
+               pr_err("target_fabric_configfs_init() failed\n");
+               return PTR_ERR(fabric);
+       }
+       /*
+        * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
+        */
+       fabric->tf_ops = tcm_qla2xxx_ops;
+       /*
+        * Setup default attribute lists for various fabric->tf_cit_tmpl
+        */
+       TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+       TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+       TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+                                               tcm_qla2xxx_tpg_attrib_attrs;
+       TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+       /*
+        * Register the fabric for use within TCM
+        */
+       ret = target_fabric_configfs_register(fabric);
+       if (ret < 0) {
+               pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+               return ret;
+       }
+       /*
+        * Setup our local pointer to *fabric
+        */
+       tcm_qla2xxx_fabric_configfs = fabric;
+       pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
+
+       /*
+        * Register the top level struct config_item_type for NPIV with TCM core
+        */
+       npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
+       if (IS_ERR(npiv_fabric)) {
+               pr_err("target_fabric_configfs_init() failed\n");
+               ret = PTR_ERR(npiv_fabric);
+               goto out_fabric;
+       }
+       /*
+        * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
+        */
+       npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
+       /*
+        * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
+        */
+       TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+       /*
+        * Register the npiv_fabric for use within TCM
+        */
+       ret = target_fabric_configfs_register(npiv_fabric);
+       if (ret < 0) {
+               pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+               goto out_fabric;
+       }
+       /*
+        * Setup our local pointer to *npiv_fabric
+        */
+       tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
+       pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
+
+       tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+                                               WQ_MEM_RECLAIM, 0);
+       if (!tcm_qla2xxx_free_wq) {
+               ret = -ENOMEM;
+               goto out_fabric_npiv;
+       }
+
+       tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+       if (!tcm_qla2xxx_cmd_wq) {
+               ret = -ENOMEM;
+               goto out_free_wq;
+       }
+
+       return 0;
+
+out_free_wq:
+       destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+       target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+out_fabric:
+       target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+       return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+       destroy_workqueue(tcm_qla2xxx_cmd_wq);
+       destroy_workqueue(tcm_qla2xxx_free_wq);
+
+       target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+       tcm_qla2xxx_fabric_configfs = NULL;
+       pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
+
+       target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+       tcm_qla2xxx_npiv_fabric_configfs = NULL;
+       pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+       int ret;
+
+       ret = tcm_qla2xxx_register_configfs();
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+       tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644 (file)
index 0000000..8254981
--- /dev/null
@@ -0,0 +1,82 @@
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+#define TCM_QLA2XXX_VERSION    "v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN    32
+/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
+#define TCM_QLA2XXX_NPIV_NAMELEN 66
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+       /* From libfc struct fc_rport->port_id */
+       u32 nport_id;
+       /* Binary World Wide unique Node Name for remote FC Initiator Nport */
+       u64 nport_wwnn;
+       /* ASCII formatted WWPN for FC Initiator Nport */
+       char nport_name[TCM_QLA2XXX_NAMELEN];
+       /* Pointer to qla_tgt_sess */
+       struct qla_tgt_sess *qla_tgt_sess;
+       /* Pointer to TCM FC nexus */
+       struct se_session *nport_nexus;
+       /* Returned by tcm_qla2xxx_make_nodeacl() */
+       struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+       int generate_node_acls;
+       int cache_dynamic_acls;
+       int demo_mode_write_protect;
+       int prod_mode_write_protect;
+};
+
+struct tcm_qla2xxx_tpg {
+       /* FC lport target portal group tag for TCM */
+       u16 lport_tpgt;
+       /* Atomic bit to determine TPG active status */
+       atomic_t lport_tpg_enabled;
+       /* Pointer back to tcm_qla2xxx_lport */
+       struct tcm_qla2xxx_lport *lport;
+       /* Used by tcm_qla2xxx_tpg_attrib_cit */
+       struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+       /* Returned by tcm_qla2xxx_make_tpg() */
+       struct se_portal_group se_tpg;
+};
+
+#define QLA_TPG_ATTRIB(tpg)    (&(tpg)->tpg_attrib)
+
+struct tcm_qla2xxx_fc_loopid {
+       struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+       /* SCSI protocol the lport is providing */
+       u8 lport_proto_id;
+       /* Binary World Wide unique Port Name for FC Target Lport */
+       u64 lport_wwpn;
+       /* Binary World Wide unique Port Name for FC NPIV Target Lport */
+       u64 lport_npiv_wwpn;
+       /* Binary World Wide unique Node Name for FC NPIV Target Lport */
+       u64 lport_npiv_wwnn;
+       /* ASCII formatted WWPN for FC Target Lport */
+       char lport_name[TCM_QLA2XXX_NAMELEN];
+       /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
+       char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
+       /* map for fc_port pointers in 24-bit FC Port ID space */
+       struct btree_head32 lport_fcport_map;
+       /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+       struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+       /* Pointer to struct scsi_qla_host from qla2xxx LLD */
+       struct scsi_qla_host *qla_vha;
+       /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
+       struct scsi_qla_host *qla_npiv_vp;
+       /* Pointer to struct qla_tgt pointer */
+       struct qla_tgt lport_qla_tgt;
+       /* Pointer to struct fc_vport for NPIV vport from libfc */
+       struct fc_vport *npiv_vport;
+       /* Pointer to TPG=1 for non NPIV mode */
+       struct tcm_qla2xxx_tpg *tpg_1;
+       /* Returned by tcm_qla2xxx_make_lport() */
+       struct se_wwn lport_wwn;
+};
index 0b0a7d42137d7b3fef646ec376175d9c7a97454e..c681b2a355e137a99edcfd39e1c18b8c7dcbda11 100644 (file)
@@ -9,6 +9,140 @@
 #include "ql4_glbl.h"
 #include "ql4_dbg.h"
 
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+                            struct bin_attribute *ba, char *buf, loff_t off,
+                            size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+                                              struct device, kobj)));
+
+       if (!is_qla8022(ha))
+               return -EINVAL;
+
+       if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+               return 0;
+
+       return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+                                      ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+                             struct bin_attribute *ba, char *buf, loff_t off,
+                             size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+                                              struct device, kobj)));
+       uint32_t dev_state;
+       long reading;
+       int ret = 0;
+
+       if (!is_qla8022(ha))
+               return -EINVAL;
+
+       if (off != 0)
+               return ret;
+
+       buf[1] = 0;
+       ret = kstrtol(buf, 10, &reading);
+       if (ret) {
+               ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+                          __func__, ret);
+               return ret;
+       }
+
+       switch (reading) {
+       case 0:
+               /* clear dump collection flags */
+               if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+                       clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+                       /* Reload minidump template */
+                       qla4xxx_alloc_fw_dump(ha);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Firmware template reloaded\n"));
+               }
+               break;
+       case 1:
+               /* Set flag to read dump */
+               if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+                   !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+                       set_bit(AF_82XX_DUMP_READING, &ha->flags);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Raw firmware dump ready for read on (%ld).\n",
+                                         ha->host_no));
+               }
+               break;
+       case 2:
+               /* Reset HBA */
+               qla4_8xxx_idc_lock(ha);
+               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+               if (dev_state == QLA82XX_DEV_READY) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: Setting Need reset, reset_owner is 0x%x.\n",
+                                  __func__, ha->func_num);
+                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+                                       QLA82XX_DEV_NEED_RESET);
+                       set_bit(AF_82XX_RST_OWNER, &ha->flags);
+               } else
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: Reset not performed as device state is 0x%x\n",
+                                  __func__, dev_state);
+
+               qla4_8xxx_idc_unlock(ha);
+               break;
+       default:
+               /* do nothing */
+               break;
+       }
+
+       return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+       .attr = {
+               .name = "fw_dump",
+               .mode = S_IRUSR | S_IWUSR,
+       },
+       .size = 0,
+       .read = qla4_8xxx_sysfs_read_fw_dump,
+       .write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+       char *name;
+       struct bin_attribute *attr;
+} bin_file_entries[] = {
+       { "fw_dump", &sysfs_fw_dump_attr },
+       { NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+       struct Scsi_Host *host = ha->host;
+       struct sysfs_entry *iter;
+       int ret;
+
+       for (iter = bin_file_entries; iter->name; iter++) {
+               ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+                                           iter->attr);
+               if (ret)
+                       ql4_printk(KERN_ERR, ha,
+                                  "Unable to create sysfs %s binary attribute (%d).\n",
+                                  iter->name, ret);
+       }
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+       struct Scsi_Host *host = ha->host;
+       struct sysfs_entry *iter;
+
+       for (iter = bin_file_entries; iter->name; iter++)
+               sysfs_remove_bin_file(&host->shost_gendev.kobj,
+                                     iter->attr);
+}
+
 /* Scsi_Host attributes. */
 static ssize_t
 qla4xxx_fw_version_show(struct device *dev,
index 7f2492e88be72085325bbb58debacefe9aaf288c..96a5616a8fdaa6f7bbb35140befe25a0c88e7087 100644 (file)
@@ -398,6 +398,16 @@ struct isp_operations {
        int (*get_sys_info) (struct scsi_qla_host *);
 };
 
+struct ql4_mdump_size_table {
+       uint32_t size;
+       uint32_t size_cmask_02;
+       uint32_t size_cmask_04;
+       uint32_t size_cmask_08;
+       uint32_t size_cmask_10;
+       uint32_t size_cmask_FF;
+       uint32_t version;
+};
+
 /*qla4xxx ipaddress configuration details */
 struct ipaddress_config {
        uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
 #define AF_EEH_BUSY                    20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
 #define AF_BUILD_DDB_LIST              22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED              24 /* 0x01000000 */
+#define AF_82XX_RST_OWNER              25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING           26 /* 0x04000000 */
+
        unsigned long dpc_flags;
 
 #define DPC_RESET_HA                   1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
 
        uint32_t nx_dev_init_timeout;
        uint32_t nx_reset_timeout;
+       void *fw_dump;
+       uint32_t fw_dump_size;
+       uint32_t fw_dump_capture_mask;
+       void *fw_dump_tmplt_hdr;
+       uint32_t fw_dump_tmplt_size;
 
        struct completion mbx_intr_comp;
 
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
 #define PROCESS_ALL_AENS        0
 #define FLUSH_DDB_CHANGED_AENS  1
 
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP                0
+
 #endif /*_QLA4XXX_H */
index 210cd1d64475b0b3cfc0128ee262565ec7533f2c..7240948fb929bcb557398ecd774fe9fc36c7fae3 100644 (file)
@@ -385,6 +385,11 @@ struct qla_flt_region {
 #define MBOX_CMD_GET_IP_ADDR_STATE             0x0091
 #define MBOX_CMD_SEND_IPV6_ROUTER_SOL          0x0092
 #define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR  0x0093
+#define MBOX_CMD_MINIDUMP                      0x0129
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND           0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND          0x01
 
 /* Mailbox 1 */
 #define FW_STATE_READY                         0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
        uint8_t reserved2[264]; /* 0x0308 - 0x040F */
 };
 
+#define QLA82XX_DBG_STATE_ARRAY_LEN            16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN         8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN             8
+
+struct qla4_8xxx_minidump_template_hdr {
+       uint32_t entry_type;
+       uint32_t first_entry_offset;
+       uint32_t size_of_template;
+       uint32_t capture_debug_level;
+       uint32_t num_of_entries;
+       uint32_t version;
+       uint32_t driver_timestamp;
+       uint32_t checksum;
+
+       uint32_t driver_capture_mask;
+       uint32_t driver_info_word2;
+       uint32_t driver_info_word3;
+       uint32_t driver_info_word4;
+
+       uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+       uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+};
+
 #endif /*  _QLA4X_FW_H */
index 910536667cf577e1c19d7db82803944730eefbca..20b49d019043d193c50aebd9aef311cfa9571271 100644 (file)
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
 void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+                                 dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
 
 extern struct device_attribute *qla4xxx_host_attrs[];
 #endif /* _QLA4x_GBL_H */
index 90ee5d8fa7315eda08a8bdc9cd8b66aff2e82eb2..bf36723b84e10cff0a01a3925f5d5bcafaa27fb9 100644 (file)
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
        return ipv4_wait|ipv6_wait;
 }
 
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+       int status;
+       uint32_t capture_debug_level;
+       int hdr_entry_bit, k;
+       void *md_tmp;
+       dma_addr_t md_tmp_dma;
+       struct qla4_8xxx_minidump_template_hdr *md_hdr;
+
+       if (ha->fw_dump) {
+               ql4_printk(KERN_WARNING, ha,
+                          "Firmware dump previously allocated.\n");
+               return;
+       }
+
+       status = qla4xxx_req_template_size(ha);
+       if (status != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "scsi%ld: Failed to get template size\n",
+                          ha->host_no);
+               return;
+       }
+
+       clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+       /* Allocate memory for saving the template */
+       md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+                                   &md_tmp_dma, GFP_KERNEL);
+
+       /* Request template */
+       status =  qla4xxx_get_minidump_template(ha, md_tmp_dma);
+       if (status != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "scsi%ld: Failed to get minidump template\n",
+                          ha->host_no);
+               goto alloc_cleanup;
+       }
+
+       md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+       capture_debug_level = md_hdr->capture_debug_level;
+
+       /* Get capture mask based on module loadtime setting. */
+       if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+               ha->fw_dump_capture_mask = ql4xmdcapmask;
+       else
+               ha->fw_dump_capture_mask = capture_debug_level;
+
+       md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+                         md_hdr->num_of_entries));
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size  = %d\n",
+                         ha->fw_dump_tmplt_size));
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+                         ha->fw_dump_capture_mask));
+
+       /* Calculate fw_dump_size */
+       for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+            hdr_entry_bit <<= 1, k++) {
+               if (hdr_entry_bit & ha->fw_dump_capture_mask)
+                       ha->fw_dump_size += md_hdr->capture_size_array[k];
+       }
+
+       /* Total firmware dump size including command header */
+       ha->fw_dump_size += ha->fw_dump_tmplt_size;
+       ha->fw_dump = vmalloc(ha->fw_dump_size);
+       if (!ha->fw_dump)
+               goto alloc_cleanup;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Minidump Tempalate Size = 0x%x KB\n",
+                         ha->fw_dump_tmplt_size));
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+       memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+       ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+       dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+                         md_tmp, md_tmp_dma);
+}
+
 static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
 {
        uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
                              "control block\n", ha->host_no, __func__));
                return status;
        }
+
        if (!qla4xxx_fw_ready(ha))
                return status;
 
+       if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+               qla4xxx_alloc_fw_dump(ha);
+
        return qla4xxx_get_firmware_status(ha);
 }
 
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        status = QLA_SUCCESS;
                        break;
                case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                }
                break;
        case DDB_DS_SESSION_ACTIVE:
+       case DDB_DS_DISCOVERY:
                switch (state) {
                case DDB_DS_SESSION_FAILED:
                        /*
index 7ac21dabbf22fce08264314ba3a212a329a1c40e..cab8f665a41faca343dba8f404e01ba96075abaf 100644 (file)
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                }
        }
 
-       if (is_qla8022(ha)) {
-               if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
-                       DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
-                           "prematurely completing mbx cmd as firmware "
-                           "recovery detected\n", ha->host_no, __func__));
-                       return status;
-               }
-               /* Do not send any mbx cmd if h/w is in failed state*/
-               qla4_8xxx_idc_lock(ha);
-               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               qla4_8xxx_idc_unlock(ha);
-               if (dev_state == QLA82XX_DEV_FAILED) {
-                       ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
-                           "failed state, do not send any mailbox commands\n",
-                           ha->host_no, __func__);
-                       return status;
-               }
-       }
-
        if ((is_aer_supported(ha)) &&
            (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
                DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                msleep(10);
        }
 
+       if (is_qla8022(ha)) {
+               if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+                       DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                         "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+                                         ha->host_no, __func__));
+                       goto mbox_exit;
+               }
+               /* Do not send any mbx cmd if h/w is in failed state*/
+               qla4_8xxx_idc_lock(ha);
+               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+               qla4_8xxx_idc_unlock(ha);
+               if (dev_state == QLA82XX_DEV_FAILED) {
+                       ql4_printk(KERN_WARNING, ha,
+                                  "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+                                  ha->host_no, __func__);
+                       goto mbox_exit;
+               }
+       }
+
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
        ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
        return status;
 }
 
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+                                 dma_addr_t phys_addr)
+{
+       uint32_t mbox_cmd[MBOX_REG_COUNT];
+       uint32_t mbox_sts[MBOX_REG_COUNT];
+       int status;
+
+       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+       memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+       mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+       mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+       mbox_cmd[2] = LSDW(phys_addr);
+       mbox_cmd[3] = MSDW(phys_addr);
+       mbox_cmd[4] = ha->fw_dump_tmplt_size;
+       mbox_cmd[5] = 0;
+
+       status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+                                        &mbox_sts[0]);
+       if (status != QLA_SUCCESS) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+                                 ha->host_no, __func__, mbox_cmd[0],
+                                 mbox_sts[0], mbox_sts[1]));
+       }
+       return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+       uint32_t mbox_cmd[MBOX_REG_COUNT];
+       uint32_t mbox_sts[MBOX_REG_COUNT];
+       int status;
+
+       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+       memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+       mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+       mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+       status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+                                        &mbox_sts[0]);
+       if (status == QLA_SUCCESS) {
+               ha->fw_dump_tmplt_size = mbox_sts[1];
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: sts[0]=0x%04x, template  size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+                                 __func__, mbox_sts[0], mbox_sts[1],
+                                 mbox_sts[2], mbox_sts[3], mbox_sts[4],
+                                 mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+               if (ha->fw_dump_tmplt_size == 0)
+                       status = QLA_ERROR;
+       } else {
+               ql4_printk(KERN_WARNING, ha,
+                          "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+                          __func__, mbox_sts[0], mbox_sts[1]);
+               status = QLA_ERROR;
+       }
+
+       return status;
+}
+
 void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
 {
        set_bit(AF_FW_RECOVERY, &ha->flags);
index e1e46b6dac754e8bb10d9f6523b75e9ef38c2ea3..228b67020d2cde7549e9d962f0b1e95b0675155c 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/pci.h>
+#include <linux/ratelimit.h>
 #include "ql4_def.h"
 #include "ql4_glbl.h"
 
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
        return data;
 }
 
+/* Minidump related functions */
+static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
+                             u32 data, uint8_t flag)
+{
+       uint32_t win_read, off_value, rval = QLA_SUCCESS;
+
+       off_value  = off & 0xFFFF0000;
+       writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+       /* Read back value to make sure write has gone through before trying
+        * to use it.
+        */
+       win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+       if (win_read != off_value) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+                                  __func__, off_value, win_read, off));
+               return QLA_ERROR;
+       }
+
+       off_value  = off & 0x0000FFFF;
+
+       if (flag)
+               writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+                                             ha->nx_pcibase));
+       else
+               rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+                                             ha->nx_pcibase));
+
+       return rval;
+}
+
 #define CRB_WIN_LOCK_TIMEOUT 100000000
 
 int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
                }
 
                if (j >= MAX_CTL_CHECK) {
-                       if (printk_ratelimit())
-                               ql4_printk(KERN_ERR, ha,
-                                   "failed to read through agent\n");
+                       printk_ratelimited(KERN_ERR
+                                          "%s: failed to read through agent\n",
+                                          __func__);
                        break;
                }
 
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
                if (j >= MAX_CTL_CHECK) {
                        if (printk_ratelimit())
                                ql4_printk(KERN_ERR, ha,
-                                   "failed to write through agent\n");
+                                          "%s: failed to read through agent\n",
+                                          __func__);
                        ret = -1;
                        break;
                }
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
 
        drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
        drv_active |= (1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+                  __func__, ha->host_no, drv_active);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
 }
 
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
 
        drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
        drv_active &= ~(1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+                  __func__, ha->host_no, drv_active);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
 }
 
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
 
        drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
        drv_state |= (1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+                  __func__, ha->host_no, drv_state);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
 }
 
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
 
        drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
        drv_state &= ~(1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+                  __func__, ha->host_no, drv_state);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
 }
 
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
        qla4_8xxx_rom_unlock(ha);
 }
 
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+       struct qla82xx_minidump_entry_crb *crb_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+       r_addr = crb_hdr->addr;
+       r_stride = crb_hdr->crb_strd.addr_stride;
+       loop_cnt = crb_hdr->op_count;
+
+       for (i = 0; i < loop_cnt; i++) {
+               r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+               *data_ptr++ = cpu_to_le32(r_addr);
+               *data_ptr++ = cpu_to_le32(r_value);
+               r_addr += r_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+                                struct qla82xx_minidump_entry_hdr *entry_hdr,
+                                uint32_t **d_ptr)
+{
+       uint32_t addr, r_addr, c_addr, t_r_addr;
+       uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+       unsigned long p_wait, w_time, p_mask;
+       uint32_t c_value_w, c_value_r;
+       struct qla82xx_minidump_entry_cache *cache_hdr;
+       int rval = QLA_ERROR;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+
+       loop_count = cache_hdr->op_count;
+       r_addr = cache_hdr->read_addr;
+       c_addr = cache_hdr->control_addr;
+       c_value_w = cache_hdr->cache_ctrl.write_value;
+
+       t_r_addr = cache_hdr->tag_reg_addr;
+       t_value = cache_hdr->addr_ctrl.init_tag_value;
+       r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+       p_wait = cache_hdr->cache_ctrl.poll_wait;
+       p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+       for (i = 0; i < loop_count; i++) {
+               qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+
+               if (c_value_w)
+                       qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+               if (p_mask) {
+                       w_time = jiffies + p_wait;
+                       do {
+                               c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
+                                                               0, 0);
+                               if ((c_value_r & p_mask) == 0) {
+                                       break;
+                               } else if (time_after_eq(jiffies, w_time)) {
+                                       /* capturing dump failed */
+                                       return rval;
+                               }
+                       } while (1);
+               }
+
+               addr = r_addr;
+               for (k = 0; k < r_cnt; k++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       *data_ptr++ = cpu_to_le32(r_value);
+                       addr += cache_hdr->read_ctrl.read_addr_stride;
+               }
+
+               t_value += cache_hdr->addr_ctrl.tag_value_stride;
+       }
+       *d_ptr = data_ptr;
+       return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr)
+{
+       struct qla82xx_minidump_entry_crb *crb_entry;
+       uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+       uint32_t crb_addr;
+       unsigned long wtime;
+       struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+       int i;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+                                               ha->fw_dump_tmplt_hdr;
+       crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+
+       crb_addr = crb_entry->addr;
+       for (i = 0; i < crb_entry->op_count; i++) {
+               opcode = crb_entry->crb_ctrl.opcode;
+               if (opcode & QLA82XX_DBG_OPCODE_WR) {
+                       qla4_8xxx_md_rw_32(ha, crb_addr,
+                                          crb_entry->value_1, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_WR;
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_RW) {
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_RW;
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_AND) {
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+                       read_value &= crb_entry->value_2;
+                       opcode &= ~QLA82XX_DBG_OPCODE_AND;
+                       if (opcode & QLA82XX_DBG_OPCODE_OR) {
+                               read_value |= crb_entry->value_3;
+                               opcode &= ~QLA82XX_DBG_OPCODE_OR;
+                       }
+                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_OR) {
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+                       read_value |= crb_entry->value_3;
+                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_OR;
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+                       poll_time = crb_entry->crb_strd.poll_timeout;
+                       wtime = jiffies + poll_time;
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+
+                       do {
+                               if ((read_value & crb_entry->value_2) ==
+                                   crb_entry->value_1)
+                                       break;
+                               else if (time_after_eq(jiffies, wtime)) {
+                                       /* capturing dump failed */
+                                       rval = QLA_ERROR;
+                                       break;
+                               } else
+                                       read_value = qla4_8xxx_md_rw_32(ha,
+                                                               crb_addr, 0, 0);
+                       } while (1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+               }
+
+               if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+                       if (crb_entry->crb_strd.state_index_a) {
+                               index = crb_entry->crb_strd.state_index_a;
+                               addr = tmplt_hdr->saved_state_array[index];
+                       } else {
+                               addr = crb_addr;
+                       }
+
+                       read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       index = crb_entry->crb_ctrl.state_index_v;
+                       tmplt_hdr->saved_state_array[index] = read_value;
+                       opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+               }
+
+               if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+                       if (crb_entry->crb_strd.state_index_a) {
+                               index = crb_entry->crb_strd.state_index_a;
+                               addr = tmplt_hdr->saved_state_array[index];
+                       } else {
+                               addr = crb_addr;
+                       }
+
+                       if (crb_entry->crb_ctrl.state_index_v) {
+                               index = crb_entry->crb_ctrl.state_index_v;
+                               read_value =
+                                       tmplt_hdr->saved_state_array[index];
+                       } else {
+                               read_value = crb_entry->value_1;
+                       }
+
+                       qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+               }
+
+               if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+                       index = crb_entry->crb_ctrl.state_index_v;
+                       read_value = tmplt_hdr->saved_state_array[index];
+                       read_value <<= crb_entry->crb_ctrl.shl;
+                       read_value >>= crb_entry->crb_ctrl.shr;
+                       if (crb_entry->value_2)
+                               read_value &= crb_entry->value_2;
+                       read_value |= crb_entry->value_3;
+                       read_value += crb_entry->value_1;
+                       tmplt_hdr->saved_state_array[index] = read_value;
+                       opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+               }
+               crb_addr += crb_entry->crb_strd.addr_stride;
+       }
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+       return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+       struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+       r_addr = ocm_hdr->read_addr;
+       r_stride = ocm_hdr->read_addr_stride;
+       loop_cnt = ocm_hdr->op_count;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+                         __func__, r_addr, r_stride, loop_cnt));
+
+       for (i = 0; i < loop_cnt; i++) {
+               r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+               *data_ptr++ = cpu_to_le32(r_value);
+               r_addr += r_stride;
+       }
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+                         __func__, (loop_cnt * sizeof(uint32_t))));
+       *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+       struct qla82xx_minidump_entry_mux *mux_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+       r_addr = mux_hdr->read_addr;
+       s_addr = mux_hdr->select_addr;
+       s_stride = mux_hdr->select_value_stride;
+       s_value = mux_hdr->select_value;
+       loop_cnt = mux_hdr->op_count;
+
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
+               r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+               *data_ptr++ = cpu_to_le32(s_value);
+               *data_ptr++ = cpu_to_le32(r_value);
+               s_value += s_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t addr, r_addr, c_addr, t_r_addr;
+       uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+       uint32_t c_value_w;
+       struct qla82xx_minidump_entry_cache *cache_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+       loop_count = cache_hdr->op_count;
+       r_addr = cache_hdr->read_addr;
+       c_addr = cache_hdr->control_addr;
+       c_value_w = cache_hdr->cache_ctrl.write_value;
+
+       t_r_addr = cache_hdr->tag_reg_addr;
+       t_value = cache_hdr->addr_ctrl.init_tag_value;
+       r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+       for (i = 0; i < loop_count; i++) {
+               qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+               qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+               addr = r_addr;
+               for (k = 0; k < r_cnt; k++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       *data_ptr++ = cpu_to_le32(r_value);
+                       addr += cache_hdr->read_ctrl.read_addr_stride;
+               }
+               t_value += cache_hdr->addr_ctrl.tag_value_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t s_addr, r_addr;
+       uint32_t r_stride, r_value, r_cnt, qid = 0;
+       uint32_t i, k, loop_cnt;
+       struct qla82xx_minidump_entry_queue *q_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+       s_addr = q_hdr->select_addr;
+       r_cnt = q_hdr->rd_strd.read_addr_cnt;
+       r_stride = q_hdr->rd_strd.read_addr_stride;
+       loop_cnt = q_hdr->op_count;
+
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+               r_addr = q_hdr->read_addr;
+               for (k = 0; k < r_cnt; k++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+                       *data_ptr++ = cpu_to_le32(r_value);
+                       r_addr += r_stride;
+               }
+               qid += q_hdr->q_strd.queue_id_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW           0x42110030
+#define MD_DIRECT_ROM_READ_BASE                0x42150000
+
+static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_value;
+       uint32_t i, loop_cnt;
+       struct qla82xx_minidump_entry_rdrom *rom_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+       r_addr = rom_hdr->read_addr;
+       loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+                          __func__, r_addr, loop_cnt));
+
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+                                  (r_addr & 0xFFFF0000), 1);
+               r_value = qla4_8xxx_md_rw_32(ha,
+                                            MD_DIRECT_ROM_READ_BASE +
+                                            (r_addr & 0x0000FFFF), 0, 0);
+               *data_ptr++ = cpu_to_le32(r_value);
+               r_addr += sizeof(uint32_t);
+       }
+       *d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL           0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO                0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI                0x41000098
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_value, r_data;
+       uint32_t i, j, loop_cnt;
+       struct qla82xx_minidump_entry_rdmem *m_hdr;
+       unsigned long flags;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+       r_addr = m_hdr->read_addr;
+       loop_cnt = m_hdr->read_data_size/16;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+                         __func__, r_addr, m_hdr->read_data_size));
+
+       if (r_addr & 0xf) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+                                 __func__, r_addr));
+               return QLA_ERROR;
+       }
+
+       if (m_hdr->read_data_size % 16) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+                                 __func__, m_hdr->read_data_size));
+               return QLA_ERROR;
+       }
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+                         __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+       write_lock_irqsave(&ha->hw_lock, flags);
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+               r_value = 0;
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+               r_value = MIU_TA_CTL_ENABLE;
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+               r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+               for (j = 0; j < MAX_CTL_CHECK; j++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
+                                                    0, 0);
+                       if ((r_value & MIU_TA_CTL_BUSY) == 0)
+                               break;
+               }
+
+               if (j >= MAX_CTL_CHECK) {
+                       printk_ratelimited(KERN_ERR
+                                          "%s: failed to read through agent\n",
+                                           __func__);
+                       write_unlock_irqrestore(&ha->hw_lock, flags);
+                       return QLA_SUCCESS;
+               }
+
+               for (j = 0; j < 4; j++) {
+                       r_data = qla4_8xxx_md_rw_32(ha,
+                                                   MD_MIU_TEST_AGT_RDDATA[j],
+                                                   0, 0);
+                       *data_ptr++ = cpu_to_le32(r_data);
+               }
+
+               r_addr += 16;
+       }
+       write_unlock_irqrestore(&ha->hw_lock, flags);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+                         __func__, (loop_cnt * 16)));
+
+       *d_ptr = data_ptr;
+       return QLA_SUCCESS;
+}
+
+static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               int index)
+{
+       entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+                         ha->host_no, index, entry_hdr->entry_type,
+                         entry_hdr->d_ctrl.entry_capture_mask));
+}
+
+/**
+ * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+       int num_entry_hdr = 0;
+       struct qla82xx_minidump_entry_hdr *entry_hdr;
+       struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+       uint32_t *data_ptr;
+       uint32_t data_collected = 0;
+       int i, rval = QLA_ERROR;
+       uint64_t now;
+       uint32_t timestamp;
+
+       if (!ha->fw_dump) {
+               ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+                          __func__, ha->host_no);
+               return rval;
+       }
+
+       tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+                                               ha->fw_dump_tmplt_hdr;
+       data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+                                               ha->fw_dump_tmplt_size);
+       data_collected += ha->fw_dump_tmplt_size;
+
+       num_entry_hdr = tmplt_hdr->num_of_entries;
+       ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+                  __func__, data_ptr);
+       ql4_printk(KERN_INFO, ha,
+                  "[%s]: no of entry headers in Template: 0x%x\n",
+                  __func__, num_entry_hdr);
+       ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+                  __func__, ha->fw_dump_capture_mask);
+       ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+                  __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+       /* Update current timestamp before taking dump */
+       now = get_jiffies_64();
+       timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+       tmplt_hdr->driver_timestamp = timestamp;
+
+       entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+                                       (((uint8_t *)ha->fw_dump_tmplt_hdr) +
+                                        tmplt_hdr->first_entry_offset);
+
+       /* Walk through the entry headers - validate/perform required action */
+       for (i = 0; i < num_entry_hdr; i++) {
+               if (data_collected >= ha->fw_dump_size) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+                                  data_collected, ha->fw_dump_size);
+                       return rval;
+               }
+
+               if (!(entry_hdr->d_ctrl.entry_capture_mask &
+                     ha->fw_dump_capture_mask)) {
+                       entry_hdr->d_ctrl.driver_flags |=
+                                               QLA82XX_DBG_SKIPPED_FLAG;
+                       goto skip_nxt_entry;
+               }
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Data collected: [0x%x], Dump size left:[0x%x]\n",
+                                 data_collected,
+                                 (ha->fw_dump_size - data_collected)));
+
+               /* Decode the entry type and take required action to capture
+                * debug data
+                */
+               switch (entry_hdr->entry_type) {
+               case QLA82XX_RDEND:
+                       ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               case QLA82XX_CNTRL:
+                       rval = qla4_8xxx_minidump_process_control(ha,
+                                                                 entry_hdr);
+                       if (rval != QLA_SUCCESS) {
+                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               goto md_failed;
+                       }
+                       break;
+               case QLA82XX_RDCRB:
+                       qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_RDMEM:
+                       rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+                                                               &data_ptr);
+                       if (rval != QLA_SUCCESS) {
+                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               goto md_failed;
+                       }
+                       break;
+               case QLA82XX_BOARD:
+               case QLA82XX_RDROM:
+                       qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_L2DTG:
+               case QLA82XX_L2ITG:
+               case QLA82XX_L2DAT:
+               case QLA82XX_L2INS:
+                       rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+                                                               &data_ptr);
+                       if (rval != QLA_SUCCESS) {
+                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               goto md_failed;
+                       }
+                       break;
+               case QLA82XX_L1DAT:
+               case QLA82XX_L1INS:
+                       qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+                                                          &data_ptr);
+                       break;
+               case QLA82XX_RDOCM:
+                       qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_RDMUX:
+                       qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_QUEUE:
+                       qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_RDNOP:
+               default:
+                       ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               }
+
+               data_collected = (uint8_t *)data_ptr -
+                                ((uint8_t *)((uint8_t *)ha->fw_dump +
+                                               ha->fw_dump_tmplt_size));
+skip_nxt_entry:
+               /*  next entry in the template */
+               entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+                               (((uint8_t *)entry_hdr) +
+                                entry_hdr->entry_size);
+       }
+
+       if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+               ql4_printk(KERN_INFO, ha,
+                          "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+                          data_collected, ha->fw_dump_size);
+               goto md_failed;
+       }
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+                         __func__, i));
+md_failed:
+       return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+       char event_string[40];
+       char *envp[] = { event_string, NULL };
+
+       switch (code) {
+       case QL4_UEVENT_CODE_FW_DUMP:
+               snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+                        ha->host_no);
+               break;
+       default:
+               /*do nothing*/
+               break;
+       }
+
+       kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
 /**
  * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
  * @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
 
        qla4_8xxx_idc_unlock(ha);
+       if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+           !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+               if (!qla4_8xxx_collect_md_data(ha)) {
+                       qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+               } else {
+                       ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
+                       clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+               }
+       }
        rval = qla4_8xxx_try_start_fw(ha);
        qla4_8xxx_idc_lock(ha);
 
@@ -1686,6 +2360,7 @@ static void
 qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
 {
        uint32_t dev_state, drv_state, drv_active;
+       uint32_t active_mask = 0xFFFFFFFF;
        unsigned long reset_timeout;
 
        ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                qla4_8xxx_idc_lock(ha);
        }
 
-       qla4_8xxx_set_rst_ready(ha);
+       if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s(%ld): reset acknowledged\n",
+                                 __func__, ha->host_no));
+               qla4_8xxx_set_rst_ready(ha);
+       } else {
+               active_mask = (~(1 << (ha->func_num * 4)));
+       }
 
        /* wait for 10 seconds for reset ack from all functions */
        reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
                __func__, ha->host_no, drv_state, drv_active);
 
-       while (drv_state != drv_active) {
+       while (drv_state != (drv_active & active_mask)) {
                if (time_after_eq(jiffies, reset_timeout)) {
-                       printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+                                  DRIVER_NAME, drv_state, drv_active);
                        break;
                }
 
+               /*
+                * When reset_owner times out, check which functions
+                * acked/did not ack
+                */
+               if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+                                  __func__, ha->host_no, drv_state,
+                                  drv_active);
+               }
                qla4_8xxx_idc_unlock(ha);
                msleep(1000);
                qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
        }
 
+       /* Clear RESET OWNER as we are not going to use it any further */
+       clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+
        dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-       ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
-               dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+       ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+                  dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
 
        /* Force to DEV_COLD unless someone else is starting a reset */
        if (dev_state != QLA82XX_DEV_INITIALIZING) {
                ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
                qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+               qla4_8xxx_set_rst_ready(ha);
        }
 }
 
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        }
 
        dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-       ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
-               dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+                         dev_state, dev_state < MAX_STATES ?
+                         qdev_state[dev_state] : "Unknown"));
 
        /* wait for 30 seconds for device to go ready */
        dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        while (1) {
 
                if (time_after_eq(jiffies, dev_init_timeout)) {
-                       ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
+                       ql4_printk(KERN_WARNING, ha,
+                                  "%s: Device Init Failed 0x%x = %s\n",
+                                  DRIVER_NAME,
+                                  dev_state, dev_state < MAX_STATES ?
+                                  qdev_state[dev_state] : "Unknown");
                        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
                                QLA82XX_DEV_FAILED);
                }
 
                dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               ql4_printk(KERN_INFO, ha,
-                   "2:Device state is 0x%x = %s\n", dev_state,
-                   dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+               ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+                          dev_state, dev_state < MAX_STATES ?
+                          qdev_state[dev_state] : "Unknown");
 
                /* NOTE: Make sure idc unlocked upon exit of switch statement */
                switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
                ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
                qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
                    QLA82XX_DEV_NEED_RESET);
+               set_bit(AF_82XX_RST_OWNER, &ha->flags);
        } else
                ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
 
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
        qla4_8xxx_clear_rst_ready(ha);
        qla4_8xxx_idc_unlock(ha);
 
-       if (rval == QLA_SUCCESS)
+       if (rval == QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
                clear_bit(AF_FW_RECOVERY, &ha->flags);
+       }
 
        return rval;
 }
index dc7500e47b8b8e3a564cae69f4f158ec0e6a8a07..30258479f100370400590278f725d6da470b49a1 100644 (file)
@@ -792,4 +792,196 @@ struct crb_addr_pair {
 #define MIU_TEST_AGT_WRDATA_UPPER_LO   (0x0b0)
 #define        MIU_TEST_AGT_WRDATA_UPPER_HI    (0x0b4)
 
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA82XX_RDNOP  0
+#define QLA82XX_RDCRB  1
+#define QLA82XX_RDMUX  2
+#define QLA82XX_QUEUE  3
+#define QLA82XX_BOARD  4
+#define QLA82XX_RDOCM  6
+#define QLA82XX_PREGS  7
+#define QLA82XX_L1DTG  8
+#define QLA82XX_L1ITG  9
+#define QLA82XX_L1DAT  11
+#define QLA82XX_L1INS  12
+#define QLA82XX_L2DTG  21
+#define QLA82XX_L2ITG  22
+#define QLA82XX_L2DAT  23
+#define QLA82XX_L2INS  24
+#define QLA82XX_RDROM  71
+#define QLA82XX_RDMEM  72
+#define QLA82XX_CNTRL  98
+#define QLA82XX_RDEND  255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR          0x01
+#define QLA82XX_DBG_OPCODE_RW          0x02
+#define QLA82XX_DBG_OPCODE_AND         0x04
+#define QLA82XX_DBG_OPCODE_OR          0x08
+#define QLA82XX_DBG_OPCODE_POLL                0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE     0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE     0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE     0x80
+
+/* Driver Flags */
+#define QLA82XX_DBG_SKIPPED_FLAG       0x80 /* driver skipped this entry  */
+#define QLA82XX_DBG_SIZE_ERR_FLAG      0x40 /* Entry vs Capture size
+                                             * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla82xx_minidump_entry_hdr {
+       uint32_t entry_type;
+       uint32_t entry_size;
+       uint32_t entry_capture_size;
+       struct {
+               uint8_t entry_capture_mask;
+               uint8_t entry_code;
+               uint8_t driver_code;
+               uint8_t driver_flags;
+       } d_ctrl;
+};
+
+/*  Read CRB entry header */
+struct qla82xx_minidump_entry_crb {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t addr;
+       struct {
+               uint8_t addr_stride;
+               uint8_t state_index_a;
+               uint16_t poll_timeout;
+       } crb_strd;
+       uint32_t data_size;
+       uint32_t op_count;
+
+       struct {
+               uint8_t opcode;
+               uint8_t state_index_v;
+               uint8_t shl;
+               uint8_t shr;
+       } crb_ctrl;
+
+       uint32_t value_1;
+       uint32_t value_2;
+       uint32_t value_3;
+};
+
+struct qla82xx_minidump_entry_cache {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t tag_reg_addr;
+       struct {
+               uint16_t tag_value_stride;
+               uint16_t init_tag_value;
+       } addr_ctrl;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t control_addr;
+       struct {
+               uint16_t write_value;
+               uint8_t poll_mask;
+               uint8_t poll_wait;
+       } cache_ctrl;
+       uint32_t read_addr;
+       struct {
+               uint8_t read_addr_stride;
+               uint8_t read_addr_cnt;
+               uint16_t rsvd_1;
+       } read_ctrl;
+};
+
+/* Read OCM */
+struct qla82xx_minidump_entry_rdocm {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t rsvd_0;
+       uint32_t rsvd_1;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t rsvd_2;
+       uint32_t rsvd_3;
+       uint32_t read_addr;
+       uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla82xx_minidump_entry_rdmem {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t rsvd[6];
+       uint32_t read_addr;
+       uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla82xx_minidump_entry_rdrom {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t rsvd[6];
+       uint32_t read_addr;
+       uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla82xx_minidump_entry_mux {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t select_addr;
+       uint32_t rsvd_0;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t select_value;
+       uint32_t select_value_stride;
+       uint32_t read_addr;
+       uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla82xx_minidump_entry_queue {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t select_addr;
+       struct {
+               uint16_t queue_id_stride;
+               uint16_t rsvd_0;
+       } q_strd;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t rsvd_1;
+       uint32_t rsvd_2;
+       uint32_t read_addr;
+       struct {
+               uint8_t read_addr_stride;
+               uint8_t read_addr_cnt;
+               uint16_t rsvd_3;
+       } rd_strd;
+};
+
+#define QLA82XX_MINIDUMP_OCM0_SIZE             (256 * 1024)
+#define QLA82XX_MINIDUMP_L1C_SIZE              (256 * 1024)
+#define QLA82XX_MINIDUMP_L2C_SIZE              1572864
+#define QLA82XX_MINIDUMP_COMMON_STR_SIZE       0
+#define QLA82XX_MINIDUMP_FCOE_STR_SIZE         0
+#define QLA82XX_MINIDUMP_MEM_SIZE              0
+#define QLA82XX_MAX_ENTRY_HDR                  4
+
+struct qla82xx_minidump {
+       uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
+       uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
+       uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
+       uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
+       uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
+       uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE       0x129
+#define RQST_TMPLT_SIZE                                0x0
+#define RQST_TMPLT                             0x1
+#define MD_DIRECT_ROM_WINDOW                   0x42110030
+#define MD_DIRECT_ROM_READ_BASE                        0x42150000
+#define MD_MIU_TEST_AGT_CTRL                   0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO                        0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI                        0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+                               0x410000AC, 0x410000B8, 0x410000BC };
 #endif
index ee47820c30a6591824cfa42abff426fb09114e7b..cd15678f9ada740d4448f7b04f96ea89b1a956c5 100644 (file)
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
                 " Maximum queue depth to report for target devices.\n"
                 "\t\t  Default: 32.");
 
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+                " Enable or disable dynamic tracking and adjustment of\n"
+                "\t\t scsi device queue depth.\n"
+                "\t\t  0 - Disable.\n"
+                "\t\t  1 - Enable. (Default)");
+
 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                " Target Session Recovery Timeout.\n"
                "\t\t  Default: 120 sec.");
 
+int ql4xmdcapmask = 0x1F;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+                " Set the Minidump driver capture mask level.\n"
+                "\t\t  Default is 0x1F.\n"
+                "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+                " Set to enable minidump.\n"
+                "\t\t  0 - disable minidump\n"
+                "\t\t  1 - enable minidump (Default)");
+
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
  * SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
 static umode_t ql4_attr_is_visible(int param_type, int param);
 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+                                     int reason);
 
 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
     QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
        .slave_configure        = qla4xxx_slave_configure,
        .slave_alloc            = qla4xxx_slave_alloc,
        .slave_destroy          = qla4xxx_slave_destroy,
+       .change_queue_depth     = qla4xxx_change_queue_depth,
 
        .this_id                = -1,
        .cmd_per_lun            = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
        struct scsi_qla_host *ha;
-       unsigned long flags;
+       unsigned long flags, wtime;
+       struct dev_db_entry *fw_ddb_entry = NULL;
+       dma_addr_t fw_ddb_entry_dma;
+       uint32_t ddb_state;
+       int ret;
 
        DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
        sess = cls_sess->dd_data;
        ddb_entry = sess->dd_data;
        ha = ddb_entry->ha;
 
+       fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                         &fw_ddb_entry_dma, GFP_KERNEL);
+       if (!fw_ddb_entry) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Unable to allocate dma buffer\n", __func__);
+               goto destroy_session;
+       }
+
+       wtime = jiffies + (HZ * LOGOUT_TOV);
+       do {
+               ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+                                             fw_ddb_entry, fw_ddb_entry_dma,
+                                             NULL, NULL, &ddb_state, NULL,
+                                             NULL, NULL);
+               if (ret == QLA_ERROR)
+                       goto destroy_session;
+
+               if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+                   (ddb_state == DDB_DS_SESSION_FAILED))
+                       goto destroy_session;
+
+               schedule_timeout_uninterruptible(HZ);
+       } while ((time_after(wtime, jiffies)));
+
+destroy_session:
        qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        qla4xxx_free_ddb(ha, ddb_entry);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        iscsi_session_teardown(cls_sess);
+
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
                                  ha->queues_dma);
 
+        if (ha->fw_dump)
+               vfree(ha->fw_dump);
+
        ha->queues_len = 0;
        ha->queues = NULL;
        ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
        ha->response_dma = 0;
        ha->shadow_regs = NULL;
        ha->shadow_regs_dma = 0;
+       ha->fw_dump = NULL;
+       ha->fw_dump_size = 0;
 
        /* Free srb pool. */
        if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
 
        set_bit(AF_INIT_DONE, &ha->flags);
 
+       qla4_8xxx_alloc_sysfs_attr(ha);
+
        printk(KERN_INFO
               " QLogic iSCSI HBA Driver version: %s\n"
               "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
                iscsi_boot_destroy_kset(ha->boot_kset);
 
        qla4xxx_destroy_fw_ddb_session(ha);
+       qla4_8xxx_free_sysfs_attr(ha);
 
        scsi_remove_host(ha->host);
 
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
        scsi_deactivate_tcq(sdev, 1);
 }
 
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+                                     int reason)
+{
+       if (!ql4xqfulltracking)
+               return -EOPNOTSUPP;
+
+       return iscsi_change_queue_depth(sdev, qdepth, reason);
+}
+
 /**
  * qla4xxx_del_from_active_array - returns an active srb
  * @ha: Pointer to host adapter structure.
index 97b30c108e365f6d22e93fc4ca826b6eb46c5e34..cc1cc3518b87cfad495304d247d8c5bf2e0332c2 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
index 61c82a345f8275ea0ece24bcf0bcc447becf97b3..bbbc9c918d4cc515f15d3b7edce9fa2359f3594a 100644 (file)
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
 EXPORT_SYMBOL(scsi_logging_level);
 #endif
 
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
-/* sd and scsi_pm need to coordinate flushing async actions */
+/* sd, scsi core and power management need to coordinate flushing async actions */
 LIST_HEAD(scsi_sd_probe_domain);
 EXPORT_SYMBOL(scsi_sd_probe_domain);
-#endif
 
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  * You may not alter any existing entry (although adding new ones is
index 62ddfd31d4ce3539b129ee3f7763d9422ab32f5e..6dfb9785d34581eb06395ef1d4b372b243de5d87 100644 (file)
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
 {
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost;
-       struct scsi_target *starget;
 
        if (!sdev)
                return 0;
 
        shost = sdev->host;
-       starget = scsi_target(sdev);
 
-       if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
-           scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+       /*
+        * Ignore host/starget busy state.
+        * Since block layer does not have a concept of fairness across
+        * multiple queues, congestion of host/starget needs to be handled
+        * in SCSI layer.
+        */
+       if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
                return 1;
 
        return 0;
index f661a41fa4c6fef7e054ed15dd1b73596bc99691..d4201ded3b2203c0bd9cc17d8a26b528f5ccfc2c 100644 (file)
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
        err = scsi_device_quiesce(to_scsi_device(dev));
        if (err == 0) {
                drv = dev->driver;
-               if (drv && drv->suspend)
+               if (drv && drv->suspend) {
                        err = drv->suspend(dev, msg);
+                       if (err)
+                               scsi_device_resume(to_scsi_device(dev));
+               }
        }
        dev_dbg(dev, "scsi suspend: %d\n", err);
        return err;
index 01b03744f1f99ced5879d0f9e9f3a7d1d0677c11..2e5fe584aad32d2130ad59945c922eaa238fb0be 100644 (file)
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
 
        do {
                if (list_empty(&scanning_hosts))
-                       return 0;
+                       goto out;
                /* If we can't get memory immediately, that's OK.  Just
                 * sleep a little.  Even if we never get memory, the async
                 * scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
        }
  done:
        spin_unlock(&async_scan_lock);
-
        kfree(data);
+
+ out:
+       async_synchronize_full_domain(&scsi_sd_probe_domain);
+
        return 0;
 }
 
index 579760420d538d28f28d2f1e7246fc3d990e7fbf..a9617ad05f33175fd1de21bfd31fdd0cfd279d1a 100644 (file)
@@ -4130,45 +4130,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
 static void
 fc_bsg_remove(struct request_queue *q)
 {
-       struct request *req; /* block request */
-       int counts; /* totals for request_list count and starved */
-
        if (q) {
-               /* Stop taking in new requests */
-               spin_lock_irq(q->queue_lock);
-               blk_stop_queue(q);
-
-               /* drain all requests in the queue */
-               while (1) {
-                       /* need the lock to fetch a request
-                        * this may fetch the same reqeust as the previous pass
-                        */
-                       req = blk_fetch_request(q);
-                       /* save requests in use and starved */
-                       counts = q->rq.count[0] + q->rq.count[1] +
-                               q->rq.starved[0] + q->rq.starved[1];
-                       spin_unlock_irq(q->queue_lock);
-                       /* any requests still outstanding? */
-                       if (counts == 0)
-                               break;
-
-                       /* This may be the same req as the previous iteration,
-                        * always send the blk_end_request_all after a prefetch.
-                        * It is not okay to not end the request because the
-                        * prefetch started the request.
-                        */
-                       if (req) {
-                               /* return -ENXIO to indicate that this queue is
-                                * going away
-                                */
-                               req->errors = -ENXIO;
-                               blk_end_request_all(req, -ENXIO);
-                       }
-
-                       msleep(200); /* allow bsg to possibly finish */
-                       spin_lock_irq(q->queue_lock);
-               }
-
                bsg_unregister_queue(q);
                blk_cleanup_queue(q);
        }
index 1cf640e575da4567fb22516bc5eff3cf3ff0efe9..c737a16b0a1dae1e07557ad9e6a8378f6d137cb7 100644 (file)
@@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc,
        struct iscsi_cls_host *ihost = shost->shost_data;
 
        if (ihost->bsg_q) {
-               bsg_remove_queue(ihost->bsg_q);
+               bsg_unregister_queue(ihost->bsg_q);
                blk_cleanup_queue(ihost->bsg_q);
        }
        return 0;
index 74708fcaf82fe900c3a77689ccad11c91b48b173..0727345388766d22cfbbc9441c92e291a5f8c841 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/module.h>
 #include <linux/device.h>
-#include <scsi/scsi_scan.h>
+#include "scsi_priv.h"
 
 static int __init wait_scan_init(void)
 {
@@ -22,11 +22,6 @@ static int __init wait_scan_init(void)
         * and might not yet have reached the scsi async scanning
         */
        wait_for_device_probe();
-       /*
-        * and then we wait for the actual asynchronous scsi scan
-        * to finish.
-        */
-       scsi_complete_async_scans();
        return 0;
 }
 
index 6f0a4c612b3bf0f60a952d080033f98f1b2b4cff..6f72b80121a02afdc9dda6fc82530af4e16017d9 100644 (file)
@@ -1899,6 +1899,8 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
 {
        if (sdp->host->max_cmd_len < 16)
                return 0;
+       if (sdp->try_rc_10_first)
+               return 0;
        if (sdp->scsi_level > SCSI_SPC_2)
                return 1;
        if (scsi_device_protection(sdp))
index 4e010b727818cd341968bdb378a1cb3f6e544cdf..6a4fd00117ca66667173a27728b7055d2e9a5f76 100644 (file)
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        err = pci_request_regions(pdev, UFSHCD);
        if (err < 0) {
                dev_err(&pdev->dev, "request regions failed\n");
-               goto out_disable;
+               goto out_host_put;
        }
 
        hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
        iounmap(hba->mmio_base);
 out_release_regions:
        pci_release_regions(pdev);
-out_disable:
+out_host_put:
        scsi_host_put(host);
+out_disable:
        pci_clear_master(pdev);
        pci_disable_device(pdev);
 out_error:
index 46ef5fe51db5476b797bffe719fed5bbfccca395..0c73dd4f43a0d0a7e727e1f92a0929095becd392 100644 (file)
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
        mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
        if (!cs) {
-               cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+               cs = kzalloc(sizeof *cs, GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
                cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                cs = spi->controller_state;
                list_del(&cs->node);
 
+               kfree(cs);
        }
 
        if (spi->chip_select < spi->master->num_chipselect) {
index 1c3d6386ea36a918d71a1dbed9c3c07cbafaa18a..aeac1caba3f9918b2f574afc4189bd05743f403c 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/pci.h>
 #include <linux/usb.h>
 #include <linux/errno.h>
+#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
 }
 EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
 
+#if IS_ENABLED(CONFIG_USB)
+
 static int comedi_old_usb_auto_config(struct usb_interface *intf,
                                      struct comedi_driver *driver)
 {
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
        comedi_driver_unregister(comedi_driver);
 }
 EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+#endif
index 292af0f7f4511668b03e7cc2662f85093549612f..51665132c61b8057e6158d63ee5a595504308225 100644 (file)
@@ -104,7 +104,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
 
 void netlink_exit(struct sock *sock)
 {
-       sock_release(sock->sk_socket);
+       netlink_kernel_release(sock);
 }
 
 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
index 0338c7cd0a8b4a7d5f1ead8a4c766c981c59cf07..f03fbd3bb4547618ced53e342ee9d546052fed9f 100644 (file)
@@ -29,8 +29,6 @@ Then fill in the following:
        * info->driver_module:
                Set to THIS_MODULE. Used to ensure correct ownership
                of various resources allocate by the core.
-       * info->num_interrupt_lines:
-               Number of event triggering hardware lines the device has.
        * info->event_attrs:
                Attributes used to enable / disable hardware events.
        * info->attrs:
index 2490dd25093b43cdc889de09d0bf264e81369ed8..8f1b3af02f299b869f7aeffa629ccdfc3cfb1c19 100644 (file)
@@ -13,6 +13,7 @@ config AD7291
 config AD7298
        tristate "Analog Devices AD7298 ADC driver"
        depends on SPI
+       select IIO_KFIFO_BUF if IIO_BUFFER
        help
          Say yes here to build support for Analog Devices AD7298
          8 Channel ADC with temperature sensor.
index 10ab6dc823b911453275dbce860b68ee4ef957c0..a13afff2dfe6d219264096bcfc1f51d8f8b7e009 100644 (file)
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
                .indexed = 1,                                   \
                .channel = num,                                 \
                .address = num,                                 \
-               .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,    \
+               .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |   \
+                               IIO_CHAN_INFO_SCALE_SHARED_BIT, \
                .scan_index = num,                              \
                .scan_type = IIO_ST('s', 16, 16, 0),            \
        }
index 3295ea63f3eb5d53bcafabc39a372846ba0610b3..97ef67036e3f2befa84f148b45b471df36d22650 100644 (file)
@@ -129,6 +129,7 @@ static void send_space_homebrew(long length);
 
 static struct lirc_serial hardware[] = {
        [LIRC_HOMEBREW] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
                .signal_pin        = UART_MSR_DCD,
                .signal_pin_change = UART_MSR_DDCD,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -145,6 +146,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IRDEO] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = UART_MCR_OUT2,
@@ -156,6 +158,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IRDEO_REMOTE] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
@@ -167,6 +170,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_ANIMAX] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
                .signal_pin        = UART_MSR_DCD,
                .signal_pin_change = UART_MSR_DDCD,
                .on  = 0,
@@ -177,6 +181,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IGOR] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -201,6 +206,7 @@ static struct lirc_serial hardware[] = {
         * See also http://www.nslu2-linux.org for this device
         */
        [LIRC_NSLU2] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_NSLU2].lock),
                .signal_pin        = UART_MSR_CTS,
                .signal_pin_change = UART_MSR_DCTS,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
index 11acd4c35ed2c0d043cb8b45a12e5a36ca87460e..8c6ed3b0c6f6c9e63c15d0c42ca1985878a2c270 100644 (file)
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
         */
        ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
        if (ret) {
-               dev_err(dev->dev, "could not map (paddr)!\n");
+               dev_err(dev->dev,
+                       "could not map (paddr)!  Skipping framebuffer alloc\n");
                ret = -ENOMEM;
                goto fail;
        }
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
 
        fbi = helper->fbdev;
 
-       unregister_framebuffer(fbi);
-       framebuffer_release(fbi);
+       /* only cleanup framebuffer if it is present */
+       if (fbi) {
+               unregister_framebuffer(fbi);
+               framebuffer_release(fbi);
+       }
 
        drm_fb_helper_fini(helper);
 
index 4e7ef0e6b79c0f13bbb0c05d360a368ea9cb3cd9..d46764b5aaba07513dbf399be27a032b6eb1b702 100644 (file)
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
        return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-       .put_page = zcache_frontswap_put_page,
-       .get_page = zcache_frontswap_get_page,
+       .store = zcache_frontswap_store,
+       .load = zcache_frontswap_load,
        .invalidate_page = zcache_frontswap_flush_page,
        .invalidate_area = zcache_frontswap_flush_area,
        .init = zcache_frontswap_init
index 9bd18e2d05130dcbab81c55cd4853c68521870bf..69f616c6964ecd752c23f2f5918d03abee2cd7c9 100644 (file)
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        /* - */
        {USB_DEVICE(0x20F4, 0x646B)},
        {USB_DEVICE(0x083A, 0xC512)},
+       {USB_DEVICE(0x25D4, 0x4CA1)},
+       {USB_DEVICE(0x25D4, 0x4CAB)},
 
 /* RTL8191SU */
        /* Realtek */
index 2734dacacbaf3d19e29a4399f4fb4e50dc6d86d4..784c796b9848a12b6167150d3a94d20a086e68b8 100644 (file)
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
  * Swizzling increases objects per swaptype, increasing tmem concurrency
  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
  * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
+ * frontswap_load(), but has side-effects. Hence using 8.
  */
 #define SWIZ_BITS              8
 #define SWIZ_MASK              ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
        return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-       .put_page = zcache_frontswap_put_page,
-       .get_page = zcache_frontswap_get_page,
+       .store = zcache_frontswap_store,
+       .load = zcache_frontswap_load,
        .invalidate_page = zcache_frontswap_flush_page,
        .invalidate_area = zcache_frontswap_flush_area,
        .init = zcache_frontswap_init
index 37c609898f84b53ffdd5b959bd9b3fa663739247..7e6136e2ce81dc256babc191fab05953aabf6bfc 100644 (file)
@@ -587,14 +587,14 @@ static void sbp_management_request_logout(
 {
        struct sbp_tport *tport = agent->tport;
        struct sbp_tpg *tpg = tport->tpg;
-       int login_id;
+       int id;
        struct sbp_login_descriptor *login;
 
-       login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+       id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 
-       login = sbp_login_find_by_id(tpg, login_id);
+       login = sbp_login_find_by_id(tpg, id);
        if (!login) {
-               pr_warn("cannot find login: %d\n", login_id);
+               pr_warn("cannot find login: %d\n", id);
 
                req->status.status = cpu_to_be32(
                        STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
index e624b836469cdc48af94a51be106df42a85ab63f..91799973081a3d907cd260792df3f573d1dbec82 100644 (file)
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
 
 out:
        transport_kunmap_data_sg(cmd);
-       target_complete_cmd(cmd, GOOD);
-       return 0;
+       if (!rc)
+               target_complete_cmd(cmd, GOOD);
+       return rc;
 }
 
 static inline int core_alua_state_nonoptimized(
index 9888693a18fe0c7a024ef150483097a12e19fcc4..664f6e775d0e45e1f4baac3f1c68f724f8185fc4 100644 (file)
@@ -1095,7 +1095,7 @@ int target_emulate_write_same(struct se_cmd *cmd)
        if (num_blocks != 0)
                range = num_blocks;
        else
-               range = (dev->transport->get_blocks(dev) - lba);
+               range = (dev->transport->get_blocks(dev) - lba) + 1;
 
        pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
                 (unsigned long long)lba, (unsigned long long)range);
index 686dba189f8eba7fac5bd9ab1d13e6833ec1e4b4..9f99d0404908fd04fe4a68e30c00b3e015f48879 100644 (file)
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
                ret = PTR_ERR(dev_p);
                goto fail;
        }
-
-       /* O_DIRECT too? */
-       flags = O_RDWR | O_CREAT | O_LARGEFILE;
-
        /*
-        * If fd_buffered_io=1 has not been set explicitly (the default),
-        * use O_SYNC to force FILEIO writes to disk.
+        * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+        * of pure timestamp updates.
         */
-       if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
-               flags |= O_SYNC;
+       flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 
        file = filp_open(dev_p, flags, 0600);
        if (IS_ERR(file)) {
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
        }
 }
 
-static void fd_emulate_write_fua(struct se_cmd *cmd)
-{
-       struct se_device *dev = cmd->se_dev;
-       struct fd_dev *fd_dev = dev->dev_ptr;
-       loff_t start = cmd->t_task_lba *
-               dev->se_sub_dev->se_dev_attrib.block_size;
-       loff_t end = start + cmd->data_length;
-       int ret;
-
-       pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
-               cmd->t_task_lba, cmd->data_length);
-
-       ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
-       if (ret != 0)
-               pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
-}
-
 static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
                u32 sgl_nents, enum dma_data_direction data_direction)
 {
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
                ret = fd_do_readv(cmd, sgl, sgl_nents);
        } else {
                ret = fd_do_writev(cmd, sgl, sgl_nents);
-
+               /*
+                * Perform implict vfs_fsync_range() for fd_do_writev() ops
+                * for SCSI WRITEs with Forced Unit Access (FUA) set.
+                * Allow this to happen independent of WCE=0 setting.
+                */
                if (ret > 0 &&
-                   dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
                    (cmd->se_cmd_flags & SCF_FUA)) {
-                       /*
-                        * We might need to be a bit smarter here
-                        * and return some sense data to let the initiator
-                        * know the FUA WRITE cache sync failed..?
-                        */
-                       fd_emulate_write_fua(cmd);
-               }
+                       struct fd_dev *fd_dev = dev->dev_ptr;
+                       loff_t start = cmd->t_task_lba *
+                               dev->se_sub_dev->se_dev_attrib.block_size;
+                       loff_t end = start + cmd->data_length;
 
+                       vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+               }
        }
 
        if (ret < 0) {
@@ -442,7 +422,6 @@ enum {
 static match_table_t tokens = {
        {Opt_fd_dev_name, "fd_dev_name=%s"},
        {Opt_fd_dev_size, "fd_dev_size=%s"},
-       {Opt_fd_buffered_io, "fd_buffered_io=%d"},
        {Opt_err, NULL}
 };
 
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params(
        struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
        char *orig, *ptr, *arg_p, *opts;
        substring_t args[MAX_OPT_ARGS];
-       int ret = 0, arg, token;
+       int ret = 0, token;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params(
                                        " bytes\n", fd_dev->fd_dev_size);
                        fd_dev->fbd_flags |= FBDF_HAS_SIZE;
                        break;
-               case Opt_fd_buffered_io:
-                       match_int(args, &arg);
-                       if (arg != 1) {
-                               pr_err("bogus fd_buffered_io=%d value\n", arg);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-
-                       pr_debug("FILEIO: Using buffered I/O"
-                               " operations for struct fd_dev\n");
-
-                       fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
-                       break;
                default:
                        break;
                }
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params(
        ssize_t bl = 0;
 
        bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
-       bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
-               fd_dev->fd_dev_name, fd_dev->fd_dev_size,
-               (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
-               "Buffered" : "Synchronous");
+       bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: O_DSYNC\n",
+               fd_dev->fd_dev_name, fd_dev->fd_dev_size);
        return bl;
 }
 
index fbd59ef7d8be4e7fec85229634cfb8244ce84b40..70ce7fd7111dd81da3cd1b4d89f5eff822889c8a 100644 (file)
@@ -14,7 +14,6 @@
 
 #define FBDF_HAS_PATH          0x01
 #define FBDF_HAS_SIZE          0x02
-#define FDBD_USE_BUFFERED_IO   0x04
 
 struct fd_dev {
        u32             fbd_flags;
index 85564998500a5bc1330fefbd0860a5425fda5939..a1bcd927a9e60ed6a80903ff737c661c075b675b 100644 (file)
@@ -2031,7 +2031,7 @@ static int __core_scsi3_write_aptpl_to_file(
        if (IS_ERR(file) || !file || !file->f_dentry) {
                pr_err("filp_open(%s) for APTPL metadata"
                        " failed\n", path);
-               return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+               return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
        }
 
        iov[0].iov_base = &buf[0];
@@ -3818,7 +3818,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
                cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = EINVAL;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3828,7 +3828,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
         */
        if (!cmd->se_sess) {
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (cmd->data_length < 24) {
index b05fdc0c05d33df0560d03222f332fa40b249864..634d0f31a28cc59f621ea6122a13777c320339d5 100644 (file)
@@ -315,7 +315,7 @@ void transport_register_session(
 }
 EXPORT_SYMBOL(transport_register_session);
 
-static void target_release_session(struct kref *kref)
+void target_release_session(struct kref *kref)
 {
        struct se_session *se_sess = container_of(kref,
                        struct se_session, sess_kref);
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session);
 
 void target_put_session(struct se_session *se_sess)
 {
+       struct se_portal_group *tpg = se_sess->se_tpg;
+
+       if (tpg->se_tpg_tfo->put_session != NULL) {
+               tpg->se_tpg_tfo->put_session(se_sess);
+               return;
+       }
        kref_put(&se_sess->sess_kref, target_release_session);
 }
 EXPORT_SYMBOL(target_put_session);
index f03fb9730f5bb89a0cd003817544693e3600e383..5b65f33939a84ab6eef4a12cc0fe84b61d64e96c 100644 (file)
@@ -230,6 +230,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
 {
        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 
+       if (cmd->aborted)
+               return ~0;
        return fc_seq_exch(cmd->seq)->rxid;
 }
 
index cb99da920068986d2c2153f6af507781561074d5..87901fa74dd7e1acb136cbdf0607aa4b8eeeb652 100644 (file)
@@ -58,7 +58,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
        struct ft_tport *tport;
        int i;
 
-       tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+       tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+                                         lockdep_is_held(&ft_lport_lock));
        if (tport && tport->tpg)
                return tport;
 
index 35819e31262472140be9ec2dfa5210a087a43be1..6cc4358f68c12ad2c779c7837207ce875968cf40 100644 (file)
@@ -1033,7 +1033,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
        if (!retinfo)
                return -EFAULT;
        memset(&tmp, 0, sizeof(tmp));
-       tty_lock(tty);
+       tty_lock();
        tmp.line = tty->index;
        tmp.port = state->port;
        tmp.flags = state->tport.flags;
@@ -1042,7 +1042,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
        tmp.close_delay = state->tport.close_delay;
        tmp.closing_wait = state->tport.closing_wait;
        tmp.custom_divisor = state->custom_divisor;
-       tty_unlock(tty);
+       tty_unlock();
        if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
                return -EFAULT;
        return 0;
@@ -1059,12 +1059,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
        if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
                return -EFAULT;
 
-       tty_lock(tty);
+       tty_lock();
        change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
                new_serial.custom_divisor != state->custom_divisor;
        if (new_serial.irq || new_serial.port != state->port ||
                        new_serial.xmit_fifo_size != state->xmit_fifo_size) {
-               tty_unlock(tty);
+               tty_unlock();
                return -EINVAL;
        }
   
@@ -1074,7 +1074,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
                    (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
                    ((new_serial.flags & ~ASYNC_USR_MASK) !=
                     (port->flags & ~ASYNC_USR_MASK))) {
-                       tty_unlock(tty);
+                       tty_unlock();
                        return -EPERM;
                }
                port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1084,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
        }
 
        if (new_serial.baud_base < 9600) {
-               tty_unlock(tty);
+               tty_unlock();
                return -EINVAL;
        }
 
@@ -1116,7 +1116,7 @@ check_and_exit:
                }
        } else
                retval = startup(tty, state);
-       tty_unlock(tty);
+       tty_unlock();
        return retval;
 }
 
index 6984e1a2686a50185608dfff24eb223dbe31b0d9..e61cabdd69df36d56ea0c0cd27fcaeadc488f8de 100644 (file)
@@ -1599,7 +1599,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
         * If the port is the middle of closing, bail out now
         */
        if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
-               wait_event_interruptible_tty(tty, info->port.close_wait,
+               wait_event_interruptible_tty(info->port.close_wait,
                                !(info->port.flags & ASYNC_CLOSING));
                return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
        }
index ced26c8ccd573eb8e6757a30681901b7a0ac88eb..0d2ea0c224c35c6012e0d6c1f7fe04ff337a8931 100644 (file)
@@ -401,7 +401,7 @@ out:
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
-void __init udbg_init_debug_opal(void)
+void __init udbg_init_debug_opal_raw(void)
 {
        u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
        hvc_opal_privs[index] = &hvc_opal_boot_priv;
index d3d91dae065cfdceb3c6b4918c27ad6c252caf33..944eaeb8e0cff62b89ea735b0d98e167ddd806d0 100644 (file)
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void)
        /* already configured */
        if (info->intf != NULL)
                return 0;
-
+       /*
+        * If the toolstack (or the hypervisor) hasn't set these values, the
+        * default value is 0. Even though mfn = 0 and evtchn = 0 are
+        * theoretically correct values, in practice they never are and they
+        * mean that a legacy toolstack hasn't initialized the pv console correctly.
+        */
        r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
-       if (r < 0) {
-               kfree(info);
-               return -ENODEV;
-       }
+       if (r < 0 || v == 0)
+               goto err;
        info->evtchn = v;
-       hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
-       if (r < 0) {
-               kfree(info);
-               return -ENODEV;
-       }
+       v = 0;
+       r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
+       if (r < 0 || v == 0)
+               goto err;
        mfn = v;
        info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
-       if (info->intf == NULL) {
-               kfree(info);
-               return -ENODEV;
-       }
+       if (info->intf == NULL)
+               goto err;
        info->vtermno = HVC_COOKIE;
 
        spin_lock(&xencons_lock);
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void)
        spin_unlock(&xencons_lock);
 
        return 0;
+err:
+       kfree(info);
+       return -ENODEV;
 }
 
 static int xen_pv_console_init(void)
index 656ad93bbc96383b4ed77438e887091a8de63739..5c6c31459a2f6618cb7cf9d83c7100cf1a6d86ea 100644 (file)
@@ -1065,8 +1065,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
 
        TRACE_L("read()");
 
-       /* FIXME: should use a private lock */
-       tty_lock(tty);
+       tty_lock();
 
        pClient = findClient(pInfo, task_pid(current));
        if (pClient) {
@@ -1078,7 +1077,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
                                goto unlock;
                        }
                        /* block until there is a message: */
-                       wait_event_interruptible_tty(tty, pInfo->read_wait,
+                       wait_event_interruptible_tty(pInfo->read_wait,
                                        (pMsg = remove_msg(pInfo, pClient)));
                }
 
@@ -1108,7 +1107,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
        }
        ret = -EPERM;
 unlock:
-       tty_unlock(tty);
+       tty_unlock();
        return ret;
 }
 
@@ -1157,7 +1156,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
        pHeader->locks = 0;
        pHeader->owner = NULL;
 
-       tty_lock(tty);
+       tty_lock();
 
        pClient = findClient(pInfo, task_pid(current));
        if (pClient) {
@@ -1176,7 +1175,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
        add_tx_queue(pInfo, pHeader);
        trigger_transmit(pInfo);
 
-       tty_unlock(tty);
+       tty_unlock();
 
        return 0;
 }
index 65c7c62c7aae9b2d81b936a45f0d67f659507324..5505ffc91da4b5780b33af2cac624ea8f696f5e4 100644 (file)
@@ -47,7 +47,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
        wake_up_interruptible(&tty->read_wait);
        wake_up_interruptible(&tty->write_wait);
        tty->packet = 0;
-       /* Review - krefs on tty_link ?? */
        if (!tty->link)
                return;
        tty->link->packet = 0;
@@ -63,9 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
                        mutex_unlock(&devpts_mutex);
                }
 #endif
-               tty_unlock(tty);
+               tty_unlock();
                tty_vhangup(tty->link);
-               tty_lock(tty);
+               tty_lock();
        }
 }
 
@@ -623,27 +622,26 @@ static int ptmx_open(struct inode *inode, struct file *filp)
                return retval;
 
        /* find a device that is not in use. */
-       mutex_lock(&devpts_mutex);
+       tty_lock();
        index = devpts_new_index(inode);
+       tty_unlock();
        if (index < 0) {
                retval = index;
                goto err_file;
        }
 
-       mutex_unlock(&devpts_mutex);
-
        mutex_lock(&tty_mutex);
+       mutex_lock(&devpts_mutex);
        tty = tty_init_dev(ptm_driver, index);
+       mutex_unlock(&devpts_mutex);
+       tty_lock();
+       mutex_unlock(&tty_mutex);
 
        if (IS_ERR(tty)) {
                retval = PTR_ERR(tty);
                goto out;
        }
 
-       /* The tty returned here is locked so we can safely
-          drop the mutex */
-       mutex_unlock(&tty_mutex);
-
        set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
 
        tty_add_file(tty, filp);
@@ -656,17 +654,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                goto err_release;
 
-       tty_unlock(tty);
+       tty_unlock();
        return 0;
 err_release:
-       tty_unlock(tty);
+       tty_unlock();
        tty_release(inode, filp);
        return retval;
 out:
-       mutex_unlock(&tty_mutex);
        devpts_kill_index(inode, index);
+       tty_unlock();
 err_file:
-        mutex_unlock(&devpts_mutex);
        tty_free_file(filp);
        return retval;
 }
index 47d061b9ad4d24d24ac980e783e9539cebc27f3d..6e1958a325bd8b5512675dd52d399b32bf04803e 100644 (file)
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
 
 /**
  *     serial8250_register_8250_port - register a serial port
- *     @port: serial port template
+ *     @up: serial port template
  *
  *     Configure the serial port specified by the request. If the
  *     port exists and is in use, it is hung up and unregistered
index 4ad721fb84052a61c3452d12532bcc9d21c9a84a..c17923ec6e9505cee3f15c80670f63d9e0644916 100644 (file)
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
 struct uart_amba_port {
        struct uart_port        port;
        struct clk              *clk;
+       /* Two optional pin states - default & sleep */
+       struct pinctrl          *pinctrl;
+       struct pinctrl_state    *pins_default;
+       struct pinctrl_state    *pins_sleep;
        const struct vendor_data *vendor;
        unsigned int            dmacr;          /* dma control reg */
        unsigned int            im;             /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
        unsigned int cr;
        int retval;
 
+       /* Optionaly enable pins to be muxed in and configured */
+       if (!IS_ERR(uap->pins_default)) {
+               retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+               if (retval)
+                       dev_err(port->dev,
+                               "could not set default pins\n");
+       }
+
        retval = clk_prepare(uap->clk);
        if (retval)
                goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
 {
        struct uart_amba_port *uap = (struct uart_amba_port *)port;
        unsigned int cr;
+       int retval;
 
        /*
         * disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
         */
        clk_disable(uap->clk);
        clk_unprepare(uap->clk);
+       /* Optionally let pins go into sleep states */
+       if (!IS_ERR(uap->pins_sleep)) {
+               retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+               if (retval)
+                       dev_err(port->dev,
+                               "could not set pins to sleep state\n");
+       }
+
 
        if (uap->port.dev->platform_data) {
                struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
        if (!uap)
                return -ENODEV;
 
+       /* Allow pins to be muxed in and configured */
+       if (!IS_ERR(uap->pins_default)) {
+               ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+               if (ret)
+                       dev_err(uap->port.dev,
+                               "could not set default pins\n");
+       }
+
        ret = clk_prepare(uap->clk);
        if (ret)
                return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
 {
        struct uart_amba_port *uap;
        struct vendor_data *vendor = id->data;
-       struct pinctrl *pinctrl;
        void __iomem *base;
        int i, ret;
 
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
                goto free;
        }
 
-       pinctrl = devm_pinctrl_get_select_default(&dev->dev);
-       if (IS_ERR(pinctrl)) {
-               ret = PTR_ERR(pinctrl);
+       uap->pinctrl = devm_pinctrl_get(&dev->dev);
+       if (IS_ERR(uap->pinctrl)) {
+               ret = PTR_ERR(uap->pinctrl);
                goto unmap;
        }
+       uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+                                                PINCTRL_STATE_DEFAULT);
+       if (IS_ERR(uap->pins_default))
+               dev_err(&dev->dev, "could not get default pinstate\n");
+
+       uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+                                              PINCTRL_STATE_SLEEP);
+       if (IS_ERR(uap->pins_sleep))
+               dev_dbg(&dev->dev, "could not get sleep pinstate\n");
 
        uap->clk = clk_get(&dev->dev, NULL);
        if (IS_ERR(uap->clk)) {
index 7264d4d2671774651267d7d98e5c4a44aff7f946..80b6b1b1f7257d3b1da80f1b58523e7465bdb36b 100644 (file)
@@ -3976,7 +3976,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
         */
        if (tty_hung_up_p(filp) ||
            (info->flags & ASYNC_CLOSING)) {
-               wait_event_interruptible_tty(tty, info->close_wait,
+               wait_event_interruptible_tty(info->close_wait,
                        !(info->flags & ASYNC_CLOSING));
 #ifdef SERIAL_DO_RESTART
                if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4052,9 +4052,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
                printk("block_til_ready blocking: ttyS%d, count = %d\n",
                       info->line, info->count);
 #endif
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&info->open_wait, &wait);
@@ -4115,7 +4115,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
         */
        if (tty_hung_up_p(filp) ||
            (info->flags & ASYNC_CLOSING)) {
-               wait_event_interruptible_tty(tty, info->close_wait,
+               wait_event_interruptible_tty(info->close_wait,
                        !(info->flags & ASYNC_CLOSING));
 #ifdef SERIAL_DO_RESTART
                return ((info->flags & ASYNC_HUP_NOTIFY) ?
index 34bd345da7751d9ed0ada9af43d98940f478c301..6ae2a58d62f2f82d152c60c5bc570a5dc9742846 100644 (file)
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
        spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 /*
  *     Wait for transmitter & holding register to empty
  */
index 4604153b7954b70288399bb675c373f1e16d2bf3..1bd9163bc1181eb0aaeae952acfaf910911b3720 100644 (file)
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev,
        return 0;
 }
 
+static void sci_cleanup_single(struct sci_port *port)
+{
+       sci_free_gpios(port);
+
+       clk_put(port->iclk);
+       clk_put(port->fclk);
+
+       pm_runtime_disable(port->port.dev);
+}
+
 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
 static void serial_console_putchar(struct uart_port *port, int ch)
 {
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev)
        cpufreq_unregister_notifier(&port->freq_transition,
                                    CPUFREQ_TRANSITION_NOTIFIER);
 
-       sci_free_gpios(port);
-
        uart_remove_one_port(&sci_uart_driver, &port->port);
 
-       clk_put(port->iclk);
-       clk_put(port->fclk);
+       sci_cleanup_single(port);
 
-       pm_runtime_disable(&dev->dev);
        return 0;
 }
 
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev,
                           index+1, SCI_NPORTS);
                dev_notice(&dev->dev, "Consider bumping "
                           "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
-               return 0;
+               return -EINVAL;
        }
 
        ret = sci_init_single(dev, sciport, index, p);
        if (ret)
                return ret;
 
-       return uart_add_one_port(&sci_uart_driver, &sciport->port);
+       ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+       if (ret) {
+               sci_cleanup_single(sciport);
+               return ret;
+       }
+
+       return 0;
 }
 
 static int __devinit sci_probe(struct platform_device *dev)
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev)
 
        ret = sci_probe_single(dev, dev->id, p, sp);
        if (ret)
-               goto err_unreg;
+               return ret;
 
        sp->freq_transition.notifier_call = sci_notifier;
 
        ret = cpufreq_register_notifier(&sp->freq_transition,
                                        CPUFREQ_TRANSITION_NOTIFIER);
-       if (unlikely(ret < 0))
-               goto err_unreg;
+       if (unlikely(ret < 0)) {
+               sci_cleanup_single(sp);
+               return ret;
+       }
 
 #ifdef CONFIG_SH_STANDARD_BIOS
        sh_bios_gdb_detach();
 #endif
 
        return 0;
-
-err_unreg:
-       sci_remove(dev);
-       return ret;
 }
 
 static int sci_suspend(struct device *dev)
index 5ed0daae65647c366dc9948307440795b8eda39b..593d40ad0a6be9b0de161803aa69c9011e2fdab8 100644 (file)
@@ -3338,9 +3338,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                        printk("%s(%d):block_til_ready blocking on %s count=%d\n",
                                 __FILE__,__LINE__, tty->driver->name, port->count );
                                 
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        
        set_current_state(TASK_RUNNING);
index 45b43f11ca3927df3c1c680076c883808f600db9..aa1debf97cc741e3f5914cb5396c8397bd37f362 100644 (file)
@@ -3336,9 +3336,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
                }
 
                DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
 
        set_current_state(TASK_RUNNING);
index 4a1e4f07765bbce2872b394560d3cc0c93a1ece4..a3dddc12d2fedc3ec261c2c8f2b3da215f564a20 100644 (file)
@@ -3357,9 +3357,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
                        printk("%s(%d):%s block_til_ready() count=%d\n",
                                 __FILE__,__LINE__, tty->driver->name, port->count );
 
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
 
        set_current_state(TASK_RUNNING);
index 9e930c009bf23b49cd7c4efae3d4ccc427444656..b425c79675ad96adc187c601e36d65c2bebc3581 100644 (file)
@@ -185,7 +185,6 @@ void free_tty_struct(struct tty_struct *tty)
                put_device(tty->dev);
        kfree(tty->write_buf);
        tty_buffer_free_all(tty);
-       tty->magic = 0xDEADDEAD;
        kfree(tty);
 }
 
@@ -574,7 +573,7 @@ void __tty_hangup(struct tty_struct *tty)
        }
        spin_unlock(&redirect_lock);
 
-       tty_lock(tty);
+       tty_lock();
 
        /* some functions below drop BTM, so we need this bit */
        set_bit(TTY_HUPPING, &tty->flags);
@@ -667,7 +666,7 @@ void __tty_hangup(struct tty_struct *tty)
        clear_bit(TTY_HUPPING, &tty->flags);
        tty_ldisc_enable(tty);
 
-       tty_unlock(tty);
+       tty_unlock();
 
        if (f)
                fput(f);
@@ -1104,12 +1103,12 @@ void tty_write_message(struct tty_struct *tty, char *msg)
 {
        if (tty) {
                mutex_lock(&tty->atomic_write_lock);
-               tty_lock(tty);
+               tty_lock();
                if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
-                       tty_unlock(tty);
+                       tty_unlock();
                        tty->ops->write(tty, msg, strlen(msg));
                } else
-                       tty_unlock(tty);
+                       tty_unlock();
                tty_write_unlock(tty);
        }
        return;
@@ -1404,7 +1403,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
        }
        initialize_tty_struct(tty, driver, idx);
 
-       tty_lock(tty);
        retval = tty_driver_install_tty(driver, tty);
        if (retval < 0)
                goto err_deinit_tty;
@@ -1417,11 +1415,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
        retval = tty_ldisc_setup(tty, tty->link);
        if (retval)
                goto err_release_tty;
-       /* Return the tty locked so that it cannot vanish under the caller */
        return tty;
 
 err_deinit_tty:
-       tty_unlock(tty);
        deinitialize_tty_struct(tty);
        free_tty_struct(tty);
 err_module_put:
@@ -1430,7 +1426,6 @@ err_module_put:
 
        /* call the tty release_tty routine to clean out this slot */
 err_release_tty:
-       tty_unlock(tty);
        printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
                                 "clearing slot %d\n", idx);
        release_tty(tty, idx);
@@ -1633,7 +1628,7 @@ int tty_release(struct inode *inode, struct file *filp)
        if (tty_paranoia_check(tty, inode, __func__))
                return 0;
 
-       tty_lock(tty);
+       tty_lock();
        check_tty_count(tty, __func__);
 
        __tty_fasync(-1, filp, 0);
@@ -1642,11 +1637,10 @@ int tty_release(struct inode *inode, struct file *filp)
        pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
                      tty->driver->subtype == PTY_TYPE_MASTER);
        devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
-       /* Review: parallel close */
        o_tty = tty->link;
 
        if (tty_release_checks(tty, o_tty, idx)) {
-               tty_unlock(tty);
+               tty_unlock();
                return 0;
        }
 
@@ -1658,7 +1652,7 @@ int tty_release(struct inode *inode, struct file *filp)
        if (tty->ops->close)
                tty->ops->close(tty, filp);
 
-       tty_unlock(tty);
+       tty_unlock();
        /*
         * Sanity check: if tty->count is going to zero, there shouldn't be
         * any waiters on tty->read_wait or tty->write_wait.  We test the
@@ -1681,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp)
                   opens on /dev/tty */
 
                mutex_lock(&tty_mutex);
-               tty_lock_pair(tty, o_tty);
+               tty_lock();
                tty_closing = tty->count <= 1;
                o_tty_closing = o_tty &&
                        (o_tty->count <= (pty_master ? 1 : 0));
@@ -1712,7 +1706,7 @@ int tty_release(struct inode *inode, struct file *filp)
 
                printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
                                __func__, tty_name(tty, buf));
-               tty_unlock_pair(tty, o_tty);
+               tty_unlock();
                mutex_unlock(&tty_mutex);
                schedule();
        }
@@ -1775,7 +1769,7 @@ int tty_release(struct inode *inode, struct file *filp)
 
        /* check whether both sides are closing ... */
        if (!tty_closing || (o_tty && !o_tty_closing)) {
-               tty_unlock_pair(tty, o_tty);
+               tty_unlock();
                return 0;
        }
 
@@ -1788,16 +1782,14 @@ int tty_release(struct inode *inode, struct file *filp)
        tty_ldisc_release(tty, o_tty);
        /*
         * The release_tty function takes care of the details of clearing
-        * the slots and preserving the termios structure. The tty_unlock_pair
-        * should be safe as we keep a kref while the tty is locked (so the
-        * unlock never unlocks a freed tty).
+        * the slots and preserving the termios structure.
         */
        release_tty(tty, idx);
-       tty_unlock_pair(tty, o_tty);
 
        /* Make this pty number available for reallocation */
        if (devpts)
                devpts_kill_index(inode, idx);
+       tty_unlock();
        return 0;
 }
 
@@ -1901,9 +1893,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
  *     Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
  *              tty->count should protect the rest.
  *              ->siglock protects ->signal/->sighand
- *
- *     Note: the tty_unlock/lock cases without a ref are only safe due to
- *     tty_mutex
  */
 
 static int tty_open(struct inode *inode, struct file *filp)
@@ -1927,7 +1916,8 @@ retry_open:
        retval = 0;
 
        mutex_lock(&tty_mutex);
-       /* This is protected by the tty_mutex */
+       tty_lock();
+
        tty = tty_open_current_tty(device, filp);
        if (IS_ERR(tty)) {
                retval = PTR_ERR(tty);
@@ -1948,19 +1938,17 @@ retry_open:
        }
 
        if (tty) {
-               tty_lock(tty);
                retval = tty_reopen(tty);
-               if (retval < 0) {
-                       tty_unlock(tty);
+               if (retval)
                        tty = ERR_PTR(retval);
-               }
-       } else  /* Returns with the tty_lock held for now */
+       } else
                tty = tty_init_dev(driver, index);
 
        mutex_unlock(&tty_mutex);
        if (driver)
                tty_driver_kref_put(driver);
        if (IS_ERR(tty)) {
+               tty_unlock();
                retval = PTR_ERR(tty);
                goto err_file;
        }
@@ -1989,7 +1977,7 @@ retry_open:
                printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
                                retval, tty->name);
 #endif
-               tty_unlock(tty); /* need to call tty_release without BTM */
+               tty_unlock(); /* need to call tty_release without BTM */
                tty_release(inode, filp);
                if (retval != -ERESTARTSYS)
                        return retval;
@@ -2001,15 +1989,17 @@ retry_open:
                /*
                 * Need to reset f_op in case a hangup happened.
                 */
+               tty_lock();
                if (filp->f_op == &hung_up_tty_fops)
                        filp->f_op = &tty_fops;
+               tty_unlock();
                goto retry_open;
        }
-       tty_unlock(tty);
+       tty_unlock();
 
 
        mutex_lock(&tty_mutex);
-       tty_lock(tty);
+       tty_lock();
        spin_lock_irq(&current->sighand->siglock);
        if (!noctty &&
            current->signal->leader &&
@@ -2017,10 +2007,11 @@ retry_open:
            tty->session == NULL)
                __proc_set_tty(current, tty);
        spin_unlock_irq(&current->sighand->siglock);
-       tty_unlock(tty);
+       tty_unlock();
        mutex_unlock(&tty_mutex);
        return 0;
 err_unlock:
+       tty_unlock();
        mutex_unlock(&tty_mutex);
        /* after locks to avoid deadlock */
        if (!IS_ERR_OR_NULL(driver))
@@ -2103,13 +2094,10 @@ out:
 
 static int tty_fasync(int fd, struct file *filp, int on)
 {
-       struct tty_struct *tty = file_tty(filp);
        int retval;
-
-       tty_lock(tty);
+       tty_lock();
        retval = __tty_fasync(fd, filp, on);
-       tty_unlock(tty);
-
+       tty_unlock();
        return retval;
 }
 
@@ -2946,7 +2934,6 @@ void initialize_tty_struct(struct tty_struct *tty,
        tty->pgrp = NULL;
        tty->overrun_time = jiffies;
        tty_buffer_init(tty);
-       mutex_init(&tty->legacy_mutex);
        mutex_init(&tty->termios_mutex);
        mutex_init(&tty->ldisc_mutex);
        init_waitqueue_head(&tty->write_wait);
index ba8be396a6215339200c5ff3f79ad34e93489acb..9911eb6b34cd06c772cadf9761647b1dc5828487 100644 (file)
@@ -568,7 +568,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
        if (IS_ERR(new_ldisc))
                return PTR_ERR(new_ldisc);
 
-       tty_lock(tty);
+       tty_lock();
        /*
         *      We need to look at the tty locking here for pty/tty pairs
         *      when both sides try to change in parallel.
@@ -582,12 +582,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
         */
 
        if (tty->ldisc->ops->num == ldisc) {
-               tty_unlock(tty);
+               tty_unlock();
                tty_ldisc_put(new_ldisc);
                return 0;
        }
 
-       tty_unlock(tty);
+       tty_unlock();
        /*
         *      Problem: What do we do if this blocks ?
         *      We could deadlock here
@@ -595,7 +595,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        tty_wait_until_sent(tty, 0);
 
-       tty_lock(tty);
+       tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
        /*
@@ -605,10 +605,10 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
                mutex_unlock(&tty->ldisc_mutex);
-               tty_unlock(tty);
+               tty_unlock();
                wait_event(tty_ldisc_wait,
                        test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
-               tty_lock(tty);
+               tty_lock();
                mutex_lock(&tty->ldisc_mutex);
        }
 
@@ -623,7 +623,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        o_ldisc = tty->ldisc;
 
-       tty_unlock(tty);
+       tty_unlock();
        /*
         *      Make sure we don't change while someone holds a
         *      reference to the line discipline. The TTY_LDISC bit
@@ -650,7 +650,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        retval = tty_ldisc_wait_idle(tty, 5 * HZ);
 
-       tty_lock(tty);
+       tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
        /* handle wait idle failure locked */
@@ -665,7 +665,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
                clear_bit(TTY_LDISC_CHANGING, &tty->flags);
                mutex_unlock(&tty->ldisc_mutex);
                tty_ldisc_put(new_ldisc);
-               tty_unlock(tty);
+               tty_unlock();
                return -EIO;
        }
 
@@ -708,7 +708,7 @@ enable:
        if (o_work)
                schedule_work(&o_tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
-       tty_unlock(tty);
+       tty_unlock();
        return retval;
 }
 
@@ -816,11 +816,11 @@ void tty_ldisc_hangup(struct tty_struct *tty)
         * need to wait for another function taking the BTM
         */
        clear_bit(TTY_LDISC, &tty->flags);
-       tty_unlock(tty);
+       tty_unlock();
        cancel_work_sync(&tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
 retry:
-       tty_lock(tty);
+       tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
        /* At this point we have a closed ldisc and we want to
@@ -831,7 +831,7 @@ retry:
                if (atomic_read(&tty->ldisc->users) != 1) {
                        char cur_n[TASK_COMM_LEN], tty_n[64];
                        long timeout = 3 * HZ;
-                       tty_unlock(tty);
+                       tty_unlock();
 
                        while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
                                timeout = MAX_SCHEDULE_TIMEOUT;
@@ -894,23 +894,6 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
        tty_ldisc_enable(tty);
        return 0;
 }
-
-static void tty_ldisc_kill(struct tty_struct *tty)
-{
-       mutex_lock(&tty->ldisc_mutex);
-       /*
-        * Now kill off the ldisc
-        */
-       tty_ldisc_close(tty, tty->ldisc);
-       tty_ldisc_put(tty->ldisc);
-       /* Force an oops if we mess this up */
-       tty->ldisc = NULL;
-
-       /* Ensure the next open requests the N_TTY ldisc */
-       tty_set_termios_ldisc(tty, N_TTY);
-       mutex_unlock(&tty->ldisc_mutex);
-}
-
 /**
  *     tty_ldisc_release               -       release line discipline
  *     @tty: tty being shut down
@@ -929,19 +912,27 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
         * race with the set_ldisc code path.
         */
 
-       tty_unlock_pair(tty, o_tty);
+       tty_unlock();
        tty_ldisc_halt(tty);
        tty_ldisc_flush_works(tty);
-       if (o_tty) {
-               tty_ldisc_halt(o_tty);
-               tty_ldisc_flush_works(o_tty);
-       }
-       tty_lock_pair(tty, o_tty);
+       tty_lock();
 
+       mutex_lock(&tty->ldisc_mutex);
+       /*
+        * Now kill off the ldisc
+        */
+       tty_ldisc_close(tty, tty->ldisc);
+       tty_ldisc_put(tty->ldisc);
+       /* Force an oops if we mess this up */
+       tty->ldisc = NULL;
+
+       /* Ensure the next open requests the N_TTY ldisc */
+       tty_set_termios_ldisc(tty, N_TTY);
+       mutex_unlock(&tty->ldisc_mutex);
 
-       tty_ldisc_kill(tty);
+       /* This will need doing differently if we need to lock */
        if (o_tty)
-               tty_ldisc_kill(o_tty);
+               tty_ldisc_release(o_tty, NULL);
 
        /* And the memory resources remaining (buffers, termios) will be
           disposed of when the kref hits zero */
index 69adc80c98cd5c035fa0f3a18df6fc3e3c127623..9ff986c32a21ef702edf515a79c19440a504b747 100644 (file)
@@ -4,59 +4,29 @@
 #include <linux/semaphore.h>
 #include <linux/sched.h>
 
-/* Legacy tty mutex glue */
+/*
+ * The 'big tty mutex'
+ *
+ * This mutex is taken and released by tty_lock() and tty_unlock(),
+ * replacing the older big kernel lock.
+ * It can no longer be taken recursively, and does not get
+ * released implicitly while sleeping.
+ *
+ * Don't use in new code.
+ */
+static DEFINE_MUTEX(big_tty_mutex);
 
 /*
  * Getting the big tty mutex.
  */
-
-void __lockfunc tty_lock(struct tty_struct *tty)
+void __lockfunc tty_lock(void)
 {
-       if (tty->magic != TTY_MAGIC) {
-               printk(KERN_ERR "L Bad %p\n", tty);
-               WARN_ON(1);
-               return;
-       }
-       tty_kref_get(tty);
-       mutex_lock(&tty->legacy_mutex);
+       mutex_lock(&big_tty_mutex);
 }
 EXPORT_SYMBOL(tty_lock);
 
-void __lockfunc tty_unlock(struct tty_struct *tty)
+void __lockfunc tty_unlock(void)
 {
-       if (tty->magic != TTY_MAGIC) {
-               printk(KERN_ERR "U Bad %p\n", tty);
-               WARN_ON(1);
-               return;
-       }
-       mutex_unlock(&tty->legacy_mutex);
-       tty_kref_put(tty);
+       mutex_unlock(&big_tty_mutex);
 }
 EXPORT_SYMBOL(tty_unlock);
-
-/*
- * Getting the big tty mutex for a pair of ttys with lock ordering
- * On a non pty/tty pair tty2 can be NULL which is just fine.
- */
-void __lockfunc tty_lock_pair(struct tty_struct *tty,
-                                       struct tty_struct *tty2)
-{
-       if (tty < tty2) {
-               tty_lock(tty);
-               tty_lock(tty2);
-       } else {
-               if (tty2 && tty2 != tty)
-                       tty_lock(tty2);
-               tty_lock(tty);
-       }
-}
-EXPORT_SYMBOL(tty_lock_pair);
-
-void __lockfunc tty_unlock_pair(struct tty_struct *tty,
-                                               struct tty_struct *tty2)
-{
-       tty_unlock(tty);
-       if (tty2 && tty2 != tty)
-               tty_unlock(tty2);
-}
-EXPORT_SYMBOL(tty_unlock_pair);
index d9cca95a5452484fe7951aebb59467d23d7e0ca4..bf6e238146ae40acd4ac8ea2f517574870366590 100644 (file)
@@ -230,7 +230,7 @@ int tty_port_block_til_ready(struct tty_port *port,
 
        /* block if port is in the process of being closed */
        if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
-               wait_event_interruptible_tty(tty, port->close_wait,
+               wait_event_interruptible_tty(port->close_wait,
                                !(port->flags & ASYNC_CLOSING));
                if (port->flags & ASYNC_HUP_NOTIFY)
                        return -EAGAIN;
@@ -296,9 +296,9 @@ int tty_port_block_til_ready(struct tty_port *port,
                        retval = -ERESTARTSYS;
                        break;
                }
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        finish_wait(&port->open_wait, &wait);
 
index c691eea515375ac1e38f2afaec9ebc8509a2c256..f5ed3d75fa5a2ce615f32b10b7f0461f839e5757 100644 (file)
@@ -46,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK)    += image/
 obj-$(CONFIG_USB_SERIAL)       += serial/
 
 obj-$(CONFIG_USB)              += misc/
-obj-$(CONFIG_USB)              += phy/
+obj-$(CONFIG_USB_COMMON)       += phy/
 obj-$(CONFIG_EARLY_PRINTK_DBGP)        += early/
 
 obj-$(CONFIG_USB_ATM)          += atm/
index f2a120eea9d4872657879dcc00689ff28c41dfed..36a2a0b7b82cc61f2e8427463927149cdd86bf2e 100644 (file)
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
 
        usb_autopm_put_interface(acm->control);
 
+       /*
+        * Unthrottle device in case the TTY was closed while throttled.
+        */
+       spin_lock_irq(&acm->read_lock);
+       acm->throttled = 0;
+       acm->throttle_req = 0;
+       spin_unlock_irq(&acm->read_lock);
+
        if (acm_submit_read_urbs(acm, GFP_KERNEL))
                goto error_submit_read_urbs;
 
index ea8b304f0e853af63651e0d70c5f6d1b69938fe9..ee469274a3fe0d03aed9259f5f4c380330742aa3 100644 (file)
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = {
                .bInterfaceSubClass = 1,
                .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
        },
+       {
+                /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
+               .match_flags        = USB_DEVICE_ID_MATCH_VENDOR |
+                                     USB_DEVICE_ID_MATCH_INT_INFO,
+               .idVendor           = HUAWEI_VENDOR_ID,
+               .bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
+               .bInterfaceSubClass = 1,
+               .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
+       },
        { }
 };
 
@@ -491,6 +500,8 @@ retry:
                        goto retry;
                }
                if (!desc->reslength) { /* zero length read */
+                       dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+                       clear_bit(WDM_READ, &desc->flags);
                        spin_unlock_irq(&desc->iuspin);
                        goto retry;
                }
index 57ed9e400c06d938a91fd9713b95a9e97030b3a3..622b4a48e732e7a83b6c760d6563994394b0caa9 100644 (file)
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
 
        pci_save_state(pci_dev);
 
-       /*
-        * Some systems crash if an EHCI controller is in D3 during
-        * a sleep transition.  We have to leave such controllers in D0.
-        */
-       if (hcd->broken_pci_sleep) {
-               dev_dbg(dev, "Staying in PCI D0\n");
-               return retval;
-       }
-
        /* If the root hub is dead rather than suspended, disallow remote
         * wakeup.  usb_hc_died() should ensure that both hosts are marked as
         * dying, so we only need to check the primary roothub.
index 04fb834c3fa1395c4d4df4a90d485929931ea0e3..8fb484984c86c6f6532d4ac8696cd5f6f4a7e5ff 100644 (file)
@@ -2324,12 +2324,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm);
 
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
 {
        return hub_is_superspeed(hub->hdev) &&
-               (portstatus & USB_PORT_STAT_LINK_STATE) ==
-               USB_SS_PORT_LS_SS_INACTIVE;
+               (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_SS_INACTIVE) ||
+                ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_COMP_MOD)) ;
 }
 
 static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2365,7 +2369,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                         *
                         * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
                         */
-                       if (hub_port_inactive(hub, portstatus)) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                int ret;
 
                                if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -3379,7 +3383,7 @@ int usb_disable_lpm(struct usb_device *udev)
                return 0;
 
        udev->lpm_disable_count++;
-       if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0))
+       if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
                return 0;
 
        /* If LPM is enabled, attempt to disable it. */
@@ -4408,9 +4412,7 @@ static void hub_events(void)
                        /* Warm reset a USB3 protocol port if it's in
                         * SS.Inactive state.
                         */
-                       if (hub_is_superspeed(hub->hdev) &&
-                               (portstatus & USB_PORT_STAT_LINK_STATE)
-                                       == USB_SS_PORT_LS_SS_INACTIVE) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
                                hub_port_reset(hub, i, NULL,
                                                HUB_BH_RESET_TIME, true);
index b548cf1dbc625c66181c1a6928c160fc597bc2e6..bdd1c6749d88a9206208a27f7cbd7d966ebeb386 100644 (file)
@@ -1838,7 +1838,6 @@ free_interfaces:
                intfc = cp->intf_cache[i];
                intf->altsetting = intfc->altsetting;
                intf->num_altsetting = intfc->num_altsetting;
-               intf->intf_assoc = find_iad(dev, cp, i);
                kref_get(&intfc->ref);
 
                alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@ free_interfaces:
                if (!alt)
                        alt = &intf->altsetting[0];
 
+               intf->intf_assoc =
+                       find_iad(dev, cp, alt->desc.bInterfaceNumber);
                intf->cur_altsetting = alt;
                usb_enable_interface(dev, intf, true);
                intf->dev.parent = &dev->dev;
index 3df1a1973b0559d7bb53fd4ce7eb148d25dff1e7..ec70df7aba17e45635306b0a924625f7c1c59aec 100644 (file)
@@ -1091,7 +1091,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                if (r == req) {
                        /* wait until it is processed */
                        dwc3_stop_active_transfer(dwc, dep->number);
-                       goto out0;
+                       goto out1;
                }
                dev_err(dwc->dev, "request %p was not queued to %s\n",
                                request, ep->name);
@@ -1099,6 +1099,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                goto out0;
        }
 
+out1:
        /* giveback the request */
        dwc3_gadget_giveback(dep, req, -ECONNRESET);
 
index e23bf7984aaf673e966ca358ea85c047f58481a4..9a9bced813ed9194bd8bd8f18b23a3fce1887f3c 100644 (file)
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 
        spin_lock_irqsave(&ep->udc->lock, flags);
 
-       if (ep->ep.desc) {
-               spin_unlock_irqrestore(&ep->udc->lock, flags);
-               DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
-               return -EBUSY;
-       }
-
        ep->ep.desc = desc;
        ep->ep.maxpacket = maxpacket;
 
index 51881f3bd07aa34cb93e937033d6458071fbaddc..b09452d6f33a8d0cbb382308cc7cb09fa2958eea 100644 (file)
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep,
        ep = container_of(_ep, struct qe_ep, ep);
 
        /* catch various bogus parameters */
-       if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] ||
+       if (!_ep || !desc || _ep->name == ep_name[0] ||
                        (desc->bDescriptorType != USB_DT_ENDPOINT))
                return -EINVAL;
 
index 4c07ca9cebf355741736d8a60306bd58a47d9e5b..7026919fc9014a1be634a3003248da199b7ecb5d 100644 (file)
@@ -153,10 +153,10 @@ struct usb_ep_para{
 #define USB_BUSMODE_DTB                0x02
 
 /* Endpoint basic handle */
-#define ep_index(EP)           ((EP)->desc->bEndpointAddress & 0xF)
+#define ep_index(EP)           ((EP)->ep.desc->bEndpointAddress & 0xF)
 #define ep_maxpacket(EP)       ((EP)->ep.maxpacket)
 #define ep_is_in(EP)   ((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
-                       USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+                       USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
                        & USB_DIR_IN) == USB_DIR_IN)
 
 /* ep0 transfer state */
index 28316858208bfeed10ab933b770295a9e6619b50..bc6f9bb9994a61a08d7579f59217aa764e0ec09b 100644 (file)
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
        ep = container_of(_ep, struct fsl_ep, ep);
 
        /* catch various bogus parameters */
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || (desc->bDescriptorType != USB_DT_ENDPOINT))
                return -EINVAL;
 
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
        /* for ep0: the desc defined here;
         * for other eps, gadget layer called ep_enable with defined desc
         */
-       udc_controller->eps[0].desc = &fsl_ep0_desc;
+       udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
        udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
 
        /* setup the udc->eps[] for non-control endpoints and link
index 5cd7b7e7ddb4e0488108730434ec224c4bd40e9e..f61a967f70828dd21c7c7e401388610e6b62ce74 100644 (file)
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
 /*
  * ### internal used help routines.
  */
-#define ep_index(EP)           ((EP)->desc->bEndpointAddress&0xF)
+#define ep_index(EP)           ((EP)->ep.desc->bEndpointAddress&0xF)
 #define ep_maxpacket(EP)       ((EP)->ep.maxpacket)
 #define ep_is_in(EP)   ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
-                       USB_DIR_IN ):((EP)->desc->bEndpointAddress \
+                       USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
                        & USB_DIR_IN)==USB_DIR_IN)
 #define get_ep_by_pipe(udc, pipe)      ((pipe == 1)? &udc->eps[0]: \
                                        &udc->eps[pipe])
index b241e6c6a7f2d545197ad5145bbecf07090dae2b..3d28fb976c7821898ea44fc4c8ac4656011d546a 100644 (file)
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
        unsigned long   flags;
 
        ep = container_of(_ep, struct goku_ep, ep);
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || desc->bDescriptorType != USB_DT_ENDPOINT)
                return -EINVAL;
        dev = ep->dev;
index 262acfd53e32b04f9d98f2c36d6adc84b64ef06a..2ab0388d93ebc06a4e11ac29988bdf5cca32b39d 100644 (file)
@@ -61,6 +61,7 @@
 #include <mach/irqs.h>
 #include <mach/board.h>
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #endif
 
index dbcd1329495ef015ed097ef8fec074ac71371011..117a4bba1b8c26a32b22e09400f6f346e531732d 100644 (file)
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
        ep = container_of(_ep, struct mv_ep, ep);
        udc = ep->udc;
 
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || desc->bDescriptorType != USB_DT_ENDPOINT)
                return -EINVAL;
 
index 7ba32469c5bdc789ac55abbe883b288bc1607c19..a460e8c204f42c9bacbac82c55c83bf9896e6672 100644 (file)
@@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
        u16             maxp;
 
        /* catch various bogus parameters */
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || desc->bDescriptorType != USB_DT_ENDPOINT
                        || ep->bEndpointAddress != desc->bEndpointAddress
                        || ep->maxpacket < usb_endpoint_maxp(desc)) {
index d7c8cb3bf759f66e02daecda062da3f0c4e0ac90..f7ff9e8e746a796f944713414ecb8acb43248f2a 100644 (file)
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
        struct pxa25x_udc       *dev;
 
        ep = container_of (_ep, struct pxa25x_ep, ep);
-       if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name
+       if (!_ep || !desc || _ep->name == ep0name
                        || desc->bDescriptorType != USB_DT_ENDPOINT
                        || ep->bEndpointAddress != desc->bEndpointAddress
                        || ep->fifo_size < usb_endpoint_maxp (desc)) {
index 36c6836eeb0f93818be4eb01367863db2e9e6ed8..236b271871a0bde44e353ebe5650c705bf48f163 100644 (file)
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
        u32 ecr = 0;
 
        hsep = our_ep(_ep);
-       if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name
+       if (!_ep || !desc || _ep->name == ep0name
                || desc->bDescriptorType != USB_DT_ENDPOINT
                || hsep->bEndpointAddress != desc->bEndpointAddress
                || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
index 3de71d37d75e209787f2dfaa0e783f8952808527..f2e51f50e528d8b532d7fcac3558c58e8554f382 100644 (file)
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
 
        ep = to_s3c2410_ep(_ep);
 
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || _ep->name == ep0name
                        || desc->bDescriptorType != USB_DT_ENDPOINT)
                return -EINVAL;
index b100f5f9f4b63a3b9ad75f2cb39341109a14cf28..800be38c78b47f5d7990243bc71bd8fb14c2bdc6 100644 (file)
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd)
        hw = ehci->async->hw;
        hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
        hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
        hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7));    /* I = 1 */
+#endif
        hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
        hw->hw_qtd_next = EHCI_LIST_END(ehci);
        ehci->async->qh_state = QH_STATE_LINKED;
index a44294d13494a8c985a887db01e07a6cbe984277..c30435499a029de5ea40c24bfe6148feeadbebf5 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/gpio.h>
+#include <linux/clk.h>
 
 /* EHCI Register Set */
 #define EHCI_INSNREG04                                 (0xA0)
 #define        EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT             8
 #define        EHCI_INSNREG05_ULPI_WRDATA_SHIFT                0
 
+/* Errata i693 */
+static struct clk      *utmi_p1_fck;
+static struct clk      *utmi_p2_fck;
+static struct clk      *xclk60mhsp1_ck;
+static struct clk      *xclk60mhsp2_ck;
+static struct clk      *usbhost_p1_fck;
+static struct clk      *usbhost_p2_fck;
+static struct clk      *init_60m_fclk;
+
 /*-------------------------------------------------------------------------*/
 
 static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
        return __raw_readl(base + reg);
 }
 
+/* Erratum i693 workaround sequence */
+static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
+{
+       int ret = 0;
+
+       /* Switch to the internal 60 MHz clock */
+       ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
+       if (ret != 0)
+               ehci_err(ehci, "init_60m_fclk set parent"
+                       "failed error:%d\n", ret);
+
+       ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
+       if (ret != 0)
+               ehci_err(ehci, "init_60m_fclk set parent"
+                       "failed error:%d\n", ret);
+
+       clk_enable(usbhost_p1_fck);
+       clk_enable(usbhost_p2_fck);
+
+       /* Wait 1ms and switch back to the external clock */
+       mdelay(1);
+       ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
+       if (ret != 0)
+               ehci_err(ehci, "xclk60mhsp1_ck set parent"
+                       "failed error:%d\n", ret);
+
+       ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
+       if (ret != 0)
+               ehci_err(ehci, "xclk60mhsp2_ck set parent"
+                       "failed error:%d\n", ret);
+
+       clk_disable(usbhost_p1_fck);
+       clk_disable(usbhost_p2_fck);
+}
+
 static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
 {
        struct usb_hcd  *hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
        }
 }
 
+static int omap_ehci_hub_control(
+       struct usb_hcd  *hcd,
+       u16             typeReq,
+       u16             wValue,
+       u16             wIndex,
+       char            *buf,
+       u16             wLength
+)
+{
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+       u32 __iomem *status_reg = &ehci->regs->port_status[
+                               (wIndex & 0xff) - 1];
+       u32             temp;
+       unsigned long   flags;
+       int             retval = 0;
+
+       spin_lock_irqsave(&ehci->lock, flags);
+
+       if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+               temp = ehci_readl(ehci, status_reg);
+               if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+                       retval = -EPIPE;
+                       goto done;
+               }
+
+               temp &= ~PORT_WKCONN_E;
+               temp |= PORT_WKDISC_E | PORT_WKOC_E;
+               ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+               omap_ehci_erratum_i693(ehci);
+
+               set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+               goto done;
+       }
+
+       spin_unlock_irqrestore(&ehci->lock, flags);
+
+       /* Handle the hub control events here */
+       return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+       spin_unlock_irqrestore(&ehci->lock, flags);
+       return retval;
+}
+
 static void disable_put_regulator(
                struct ehci_hcd_omap_platform_data *pdata)
 {
@@ -192,14 +281,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                }
        }
 
+       /* Hold PHYs in reset while initializing EHCI controller */
        if (pdata->phy_reset) {
                if (gpio_is_valid(pdata->reset_gpio_port[0]))
-                       gpio_request_one(pdata->reset_gpio_port[0],
-                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
 
                if (gpio_is_valid(pdata->reset_gpio_port[1]))
-                       gpio_request_one(pdata->reset_gpio_port[1],
-                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
 
                /* Hold the PHY in RESET for enough time till DIR is high */
                udelay(10);
@@ -241,6 +329,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
 
        ehci_reset(omap_ehci);
+       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+       if (ret) {
+               dev_err(dev, "failed to add hcd with err %d\n", ret);
+               goto err_add_hcd;
+       }
 
        if (pdata->phy_reset) {
                /* Hold the PHY in RESET for enough time till
@@ -255,17 +348,79 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                        gpio_set_value_cansleep(pdata->reset_gpio_port[1], 1);
        }
 
-       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
-       if (ret) {
-               dev_err(dev, "failed to add hcd with err %d\n", ret);
+       /* root ports should always stay powered */
+       ehci_port_power(omap_ehci, 1);
+
+       /* get clocks */
+       utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+       if (IS_ERR(utmi_p1_fck)) {
+               ret = PTR_ERR(utmi_p1_fck);
+               dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
                goto err_add_hcd;
        }
 
-       /* root ports should always stay powered */
-       ehci_port_power(omap_ehci, 1);
+       xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+       if (IS_ERR(xclk60mhsp1_ck)) {
+               ret = PTR_ERR(xclk60mhsp1_ck);
+               dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+               goto err_utmi_p1_fck;
+       }
+
+       utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+       if (IS_ERR(utmi_p2_fck)) {
+               ret = PTR_ERR(utmi_p2_fck);
+               dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+               goto err_xclk60mhsp1_ck;
+       }
+
+       xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+       if (IS_ERR(xclk60mhsp2_ck)) {
+               ret = PTR_ERR(xclk60mhsp2_ck);
+               dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+               goto err_utmi_p2_fck;
+       }
+
+       usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+       if (IS_ERR(usbhost_p1_fck)) {
+               ret = PTR_ERR(usbhost_p1_fck);
+               dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+               goto err_xclk60mhsp2_ck;
+       }
+
+       usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+       if (IS_ERR(usbhost_p2_fck)) {
+               ret = PTR_ERR(usbhost_p2_fck);
+               dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+               goto err_usbhost_p1_fck;
+       }
+
+       init_60m_fclk = clk_get(dev, "init_60m_fclk");
+       if (IS_ERR(init_60m_fclk)) {
+               ret = PTR_ERR(init_60m_fclk);
+               dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+               goto err_usbhost_p2_fck;
+       }
 
        return 0;
 
+err_usbhost_p2_fck:
+       clk_put(usbhost_p2_fck);
+
+err_usbhost_p1_fck:
+       clk_put(usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+       clk_put(xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+       clk_put(utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+       clk_put(xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+       clk_put(utmi_p1_fck);
+
 err_add_hcd:
        disable_put_regulator(pdata);
        pm_runtime_put_sync(dev);
@@ -294,6 +449,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
        disable_put_regulator(dev->platform_data);
        iounmap(hcd->regs);
        usb_put_hcd(hcd);
+
+       clk_put(utmi_p1_fck);
+       clk_put(utmi_p2_fck);
+       clk_put(xclk60mhsp1_ck);
+       clk_put(xclk60mhsp2_ck);
+       clk_put(usbhost_p1_fck);
+       clk_put(usbhost_p2_fck);
+       clk_put(init_60m_fclk);
+
        pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
 
@@ -364,7 +528,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
         * root hub support
         */
        .hub_status_data        = ehci_hub_status_data,
-       .hub_control            = ehci_hub_control,
+       .hub_control            = omap_ehci_hub_control,
        .bus_suspend            = ehci_bus_suspend,
        .bus_resume             = ehci_bus_resume,
 
index bc94d7bf072d822fb32da0867b6759dec223f69a..123481793a47afcdfc22cf476a8a8ed14301c6e1 100644 (file)
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                        hcd->has_tt = 1;
                        tdi_reset(ehci);
                }
-               if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
-                       /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
-                       if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
-                               ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
-                               hcd->broken_pci_sleep = 1;
-                               device_set_wakeup_capable(&pdev->dev, false);
-                       }
-               }
                break;
        case PCI_VENDOR_ID_TDI:
                if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
index ca819cdd0c5ecb895929ae2fbf7a6a7f2b3664bc..e7cb3925abf8e0276dc4c119e8e03116d0ea48bd 100644 (file)
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
                goto fail_create_hcd;
        }
 
-       if (pdev->dev.platform_data != NULL)
-               pdata = pdev->dev.platform_data;
+       pdata = pdev->dev.platform_data;
 
        /* initialize hcd */
        hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
index 9c2cc4633894b152047ed587b2cf9494d5fdc684..e9713d589e30d20c9a7c4d10b253e431592692b0 100644 (file)
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
  *
  * Properly shutdown the hcd, call driver's shutdown routine.
  */
-static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
+static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
 {
        struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
-
-       return 0;
 }
 
 
index 836772dfabd3e0deb077f0441981feb29c462d4b..2f3619eefefa9bca87189e5da44dd6bbb0cd8191 100644 (file)
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
 }
 
 /* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
+static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
 {
        struct ohci_hcd         *ohci = hcd_to_ohci(hcd);
        int                     port;
index 2732ef660c5c08b85baeb38c23d5ec597bf15673..7b01094d7993957dba556c4231b781a524ad72e9 100644 (file)
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
        }
 }
 
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+       u32 pls = status_reg & PORT_PLS_MASK;
+
+       /* resume state is a xHCI internal state.
+        * Do not report it to usb core.
+        */
+       if (pls == XDEV_RESUME)
+               return;
+
+       /* When the CAS bit is set then warm reset
+        * should be performed on port
+        */
+       if (status_reg & PORT_CAS) {
+               /* The CAS bit can be set while the port is
+                * in any link state.
+                * Only roothubs have CAS bit, so we
+                * pretend to be in compliance mode
+                * unless we're already in compliance
+                * or the inactive state.
+                */
+               if (pls != USB_SS_PORT_LS_COMP_MOD &&
+                   pls != USB_SS_PORT_LS_SS_INACTIVE) {
+                       pls = USB_SS_PORT_LS_COMP_MOD;
+               }
+               /* Return also connection bit -
+                * hub state machine resets port
+                * when this bit is set.
+                */
+               pls |= USB_PORT_STAT_CONNECTION;
+       }
+       /* update status field */
+       *status |= pls;
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -606,13 +642,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        else
                                status |= USB_PORT_STAT_POWER;
                }
-               /* Port Link State */
+               /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
-                       /* resume state is a xHCI internal state.
-                        * Do not report it to usb core.
-                        */
-                       if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
-                               status |= (temp & PORT_PLS_MASK);
+                       xhci_hub_report_link_state(&status, temp);
                }
                if (bus_state->port_c_suspend & (1 << wIndex))
                        status |= 1 << USB_PORT_FEAT_C_SUSPEND;
index ec4338eec8262c520d785e53e32f269b02dfabd8..77689bd64cace84001204bb21cc55085c18d94be 100644 (file)
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
                struct xhci_virt_device *virt_dev,
                int slot_id)
 {
-       struct list_head *tt;
        struct list_head *tt_list_head;
-       struct list_head *tt_next;
-       struct xhci_tt_bw_info *tt_info;
+       struct xhci_tt_bw_info *tt_info, *next;
+       bool slot_found = false;
 
        /* If the device never made it past the Set Address stage,
         * it may not have the real_port set correctly.
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
        }
 
        tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
-       if (list_empty(tt_list_head))
-               return;
-
-       list_for_each(tt, tt_list_head) {
-               tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
-               if (tt_info->slot_id == slot_id)
+       list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+               /* Multi-TT hubs will have more than one entry */
+               if (tt_info->slot_id == slot_id) {
+                       slot_found = true;
+                       list_del(&tt_info->tt_list);
+                       kfree(tt_info);
+               } else if (slot_found) {
                        break;
+               }
        }
-       /* Cautionary measure in case the hub was disconnected before we
-        * stored the TT information.
-        */
-       if (tt_info->slot_id != slot_id)
-               return;
-
-       tt_next = tt->next;
-       tt_info = list_entry(tt, struct xhci_tt_bw_info,
-                       tt_list);
-       /* Multi-TT hubs will have more than one entry */
-       do {
-               list_del(tt);
-               kfree(tt_info);
-               tt = tt_next;
-               if (list_empty(tt_list_head))
-                       break;
-               tt_next = tt->next;
-               tt_info = list_entry(tt, struct xhci_tt_bw_info,
-                               tt_list);
-       } while (tt_info->slot_id == slot_id);
 }
 
 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 {
        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
        struct dev_info *dev_info, *next;
-       struct list_head *tt_list_head;
-       struct list_head *tt;
-       struct list_head *endpoints;
-       struct list_head *ep, *q;
-       struct xhci_tt_bw_info *tt_info;
-       struct xhci_interval_bw_table *bwt;
-       struct xhci_virt_ep *virt_ep;
-
        unsigned long   flags;
        int size;
-       int i;
+       int i, j, num_ports;
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        }
        spin_unlock_irqrestore(&xhci->lock, flags);
 
-       bwt = &xhci->rh_bw->bw_table;
-       for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
-               endpoints = &bwt->interval_bw[i].endpoints;
-               list_for_each_safe(ep, q, endpoints) {
-                       virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
-                       list_del(&virt_ep->bw_endpoint_list);
-                       kfree(virt_ep);
+       num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+       for (i = 0; i < num_ports; i++) {
+               struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+               for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+                       struct list_head *ep = &bwt->interval_bw[j].endpoints;
+                       while (!list_empty(ep))
+                               list_del_init(ep->next);
                }
        }
 
-       tt_list_head = &xhci->rh_bw->tts;
-       list_for_each_safe(tt, q, tt_list_head) {
-               tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
-               list_del(tt);
-               kfree(tt_info);
+       for (i = 0; i < num_ports; i++) {
+               struct xhci_tt_bw_info *tt, *n;
+               list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+                       list_del(&tt->tt_list);
+                       kfree(tt);
+               }
        }
 
        xhci->num_usb2_ports = 0;
index 23b4aefd103609bcf91b8341a6758a9fc4c3b618..8275645889da4ce779c08c88ac4ea06473f222e0 100644 (file)
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
        num_trbs_free_temp = ep_ring->num_trbs_free;
        dequeue_temp = ep_ring->dequeue;
 
+       /* If we get two back-to-back stalls, and the first stalled transfer
+        * ends just before a link TRB, the dequeue pointer will be left on
+        * the link TRB by the code in the while loop.  So we have to update
+        * the dequeue pointer one segment further, or we'll jump off
+        * the segment into la-la-land.
+        */
+       if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+               ep_ring->deq_seg = ep_ring->deq_seg->next;
+               ep_ring->dequeue = ep_ring->deq_seg->trbs;
+       }
+
        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
                /* We have more usable TRBs */
                ep_ring->num_trbs_free++;
index afdc73ee84a61461326eada42fe2c110f414f793..a979cd0dbe0f8f61970c694b2821643fd8f5a09b 100644 (file)
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
        command = xhci_readl(xhci, &xhci->op_regs->command);
        command |= CMD_CSS;
        xhci_writel(xhci, command, &xhci->op_regs->command);
-       if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
-               xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+       if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
+               xhci_warn(xhci, "WARN: xHC save state timeout\n");
                spin_unlock_irq(&xhci->lock);
                return -ETIMEDOUT;
        }
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                command |= CMD_CRS;
                xhci_writel(xhci, command, &xhci->op_regs->command);
                if (handshake(xhci, &xhci->op_regs->status,
-                             STS_RESTORE, 0, 10*100)) {
-                       xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+                             STS_RESTORE, 0, 10 * 1000)) {
+                       xhci_warn(xhci, "WARN: xHC restore state timeout\n");
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
                }
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
        default:
                dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
                                __func__);
-               return -EINVAL;
+               return USB3_LPM_DISABLED;
        }
 
        if (sel <= max_sel_pel && pel <= max_sel_pel)
index de3d6e3e57be4b12e840f5a168b439b151fe84fc..55c0785810c99fb5286e29d39cd4d7ff2b98addb 100644 (file)
@@ -341,7 +341,11 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS       (1 << 24)
 /* wake on connect (enable) */
 #define PORT_WKCONN_E  (1 << 25)
 /* wake on disconnect (enable) */
index 768b4b55c816a6960733f73d7e0d7b86be54697f..9d63ba4d10d6fe1500cf1b73e3ccf5e3c111f952 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/dma-mapping.h>
 
 #include <mach/cputype.h>
+#include <mach/hardware.h>
 
 #include <asm/mach-types.h>
 
index 046c84433cadc92e6f8f7d8788520d73f91e4380..371baa0ee509020241b33b1f9af66a60749c5571 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 /* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR       (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_CTL_PADDR       0x01c40034
 #define USBPHY_DATAPOL         BIT(11) /* (dm355) switch D+/D- */
 #define USBPHY_PHYCLKGD                BIT(8)
 #define USBPHY_SESNDEN         BIT(7)  /* v(sess_end) comparator */
@@ -27,7 +27,7 @@
 #define USBPHY_OTGPDWN         BIT(1)
 #define USBPHY_PHYPDWN         BIT(0)
 
-#define DM355_DEEPSLEEP_PADDR  (DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DM355_DEEPSLEEP_PADDR  0x01c40048
 #define DRVVBUS_FORCE          BIT(2)
 #define DRVVBUS_OVERRIDE       BIT(1)
 
index f42c29b11f713ddbb95f7a6115e27e2921325dd8..95918dacc99ad8496d80409f18e6cf74aeffa065 100644 (file)
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
        }
 
        musb_ep->desc = NULL;
+       musb_ep->end_point.desc = NULL;
 
        /* abort all pending DMA and requests */
        nuke(musb_ep, -ESHUTDOWN);
index ef8d744800ac29c58dc24e089a645211fcd665ff..e090c799d87bcf18c5d869b1762d2b7f5feea9fa 100644 (file)
@@ -375,11 +375,21 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
         */
        if (list_empty(&qh->hep->urb_list)) {
                struct list_head        *head;
+               struct dma_controller   *dma = musb->dma_controller;
 
-               if (is_in)
+               if (is_in) {
                        ep->rx_reinit = 1;
-               else
+                       if (ep->rx_channel) {
+                               dma->channel_release(ep->rx_channel);
+                               ep->rx_channel = NULL;
+                       }
+               } else {
                        ep->tx_reinit = 1;
+                       if (ep->tx_channel) {
+                               dma->channel_release(ep->tx_channel);
+                               ep->tx_channel = NULL;
+                       }
+               }
 
                /* Clobber old pointers to this qh */
                musb_ep_set_qh(ep, is_in, NULL);
index d2a9a8e691b9dd47ea6dbeca37c870e028d5dd1e..0eabb049b6a94efb7fbb14d16cd42b3c9c0d212d 100644 (file)
@@ -305,9 +305,8 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
 
                regulator_enable(twl->usb3v3);
                twl->asleep = 1;
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-                                                               0x10);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
                status = USB_EVENT_ID;
                otg->default_a = true;
                twl->phy.state = OTG_STATE_A_IDLE;
@@ -316,12 +315,10 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
                atomic_notifier_call_chain(&twl->phy.notifier, status,
                                                        otg->gadget);
        } else  {
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
-                                                               0x10);
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-                                                               0x1);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
        }
-       twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+       twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
 
        return IRQ_HANDLED;
 }
@@ -343,7 +340,7 @@ static int twl6030_enable_irq(struct usb_phy *x)
 {
        struct twl6030_usb *twl = phy_to_twl(x);
 
-       twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+       twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
        twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
        twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
 
index 3cfabcba7447d2ccaf55d77038b43357d8cbaeec..e7cf84f0751a2a6f1d8ea27614c59abf8a7303fa 100644 (file)
@@ -2,11 +2,11 @@
 # Physical Layer USB driver configuration
 #
 comment "USB Physical Layer drivers"
-       depends on USB
+       depends on USB || USB_GADGET
 
 config USB_ISP1301
        tristate "NXP ISP1301 USB transceiver support"
-       depends on USB
+       depends on USB || USB_GADGET
        depends on I2C
        help
          Say Y here to add support for the NXP ISP1301 USB transceiver driver.
index 1b1926200ba791c1335ea9305369258898d955c1..1e71079ce33b7128c116f602d865428297804da7 100644 (file)
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
        { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
        { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
+       { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
        { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
        { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
        { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
@@ -92,6 +93,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+       { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -133,7 +135,13 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
        { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+       { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+       { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
        { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+       { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+       { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+       { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+       { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
        { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -145,7 +153,11 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+       { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+       { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+       { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+       { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
index 8c084ea34e264fe056d7430fc3af008f1e0c86b1..bc912e5a3bebddf3073fe17e015b6195be3708be 100644 (file)
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+       { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
index f3c7c78ede33a567d4b8cf7121aaf6fcb33f6567..5661c7e2d4151cbb8f2335c4572f83fc3deccff8 100644 (file)
 #define RTSYSTEMS_VID                  0x2100  /* Vendor ID */
 #define RTSYSTEMS_SERIAL_VX7_PID       0x9e52  /* Serial converter for VX-7 Radios using FT232RL */
 #define RTSYSTEMS_CT29B_PID            0x9e54  /* CT29B Radio Cable */
+#define RTSYSTEMS_RTS01_PID            0x9e57  /* USB-RTS01 Radio Cable */
 
 
 /*
index 105a6d898ca4a6595b30f88e09d889b4c5d6d844..9b026bf7afefe7002a4baf106e059d8d63d5b891 100644 (file)
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
 
 static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
 
-/* we want to look at all devices, as the vendor/product id can change
- * depending on the command line argument */
-static const struct usb_device_id generic_serial_ids[] = {
-       {.driver_info = 42},
-       {}
-};
-
 /* All of the device info needed for the Generic Serial Converter */
 struct usb_serial_driver usb_serial_generic_device = {
        .driver = {
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug)
                USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
 
        /* register our generic driver with ourselves */
-       retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids);
+       retval = usb_serial_register_drivers(serial_drivers,
+                       "usbserial_generic", generic_device_ids);
 #endif
        return retval;
 }
index d0ec1aa527199b455e152da7d692c6e126f89a83..a71fa0aa04066dd38758fe35c87cf007a3b55f4a 100644 (file)
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
                        MCT_U232_SET_REQUEST_TYPE,
                        0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
                        WDR_TIMEOUT);
-       if (rc < 0)
-               dev_err(&serial->dev->dev,
-                       "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+       kfree(buf);
+
        dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
 
-       kfree(buf);
-       return rc;
+       if (rc < 0) {
+               dev_err(&serial->dev->dev,
+                       "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+               return rc;
+       }
+       return 0;
 } /* mct_u232_set_modem_ctrl */
 
 static int mct_u232_get_modem_stat(struct usb_serial *serial,
index 81423f7361dbe8f770086c3a03f6aca09b4beb2c..d47eb06fe463b51463cb899659a5f2428d5bd24b 100644 (file)
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
        metro_priv->throttled = 0;
        spin_unlock_irqrestore(&metro_priv->lock, flags);
 
-       /*
-        * Force low_latency on so that our tty_push actually forces the data
-        * through, otherwise it is scheduled, and with high data rates (like
-        * with OHCI) data can get lost.
-        */
-       if (tty)
-               tty->low_latency = 1;
-
        /* Clear the urb pipe. */
        usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
 
index 29160f8b510146054726b68795467d8bf08a2020..57eca244842431fa100f742424c9dbc6310c4156 100644 (file)
 
 static int device_type;
 
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
index 1aae9028cd0bd9e58a75461bbbceb74b9079f0b0..417ab1b0aa30fe4088daf4017b5035b325bb0f3a 100644 (file)
@@ -47,6 +47,7 @@
 /* Function prototypes */
 static int  option_probe(struct usb_serial *serial,
                        const struct usb_device_id *id);
+static void option_release(struct usb_serial *serial);
 static int option_send_setup(struct usb_serial_port *port);
 static void option_instat_callback(struct urb *urb);
 
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb);
 #define HUAWEI_PRODUCT_E14AC                   0x14AC
 #define HUAWEI_PRODUCT_K3806                   0x14AE
 #define HUAWEI_PRODUCT_K4605                   0x14C6
+#define HUAWEI_PRODUCT_K5005                   0x14C8
 #define HUAWEI_PRODUCT_K3770                   0x14C9
 #define HUAWEI_PRODUCT_K3771                   0x14CA
 #define HUAWEI_PRODUCT_K4510                   0x14CB
@@ -234,6 +236,7 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_G1             0xA001
 #define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551          0xB001
 
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID                         0x1614
@@ -425,7 +428,7 @@ static void option_instat_callback(struct urb *urb);
 #define SAMSUNG_VENDOR_ID                       0x04e8
 #define SAMSUNG_PRODUCT_GT_B3730                0x6889
 
-/* YUGA products  www.yuga-info.com*/
+/* YUGA products  www.yuga-info.com gavin.kx@qq.com */
 #define YUGA_VENDOR_ID                         0x257A
 #define YUGA_PRODUCT_CEM600                    0x1601
 #define YUGA_PRODUCT_CEM610                    0x1602
@@ -442,6 +445,8 @@ static void option_instat_callback(struct urb *urb);
 #define YUGA_PRODUCT_CEU516                    0x160C
 #define YUGA_PRODUCT_CEU528                    0x160D
 #define YUGA_PRODUCT_CEU526                    0x160F
+#define YUGA_PRODUCT_CEU881                    0x161F
+#define YUGA_PRODUCT_CEU882                    0x162F
 
 #define YUGA_PRODUCT_CWM600                    0x2601
 #define YUGA_PRODUCT_CWM610                    0x2602
@@ -457,23 +462,26 @@ static void option_instat_callback(struct urb *urb);
 #define YUGA_PRODUCT_CWU518                    0x260B
 #define YUGA_PRODUCT_CWU516                    0x260C
 #define YUGA_PRODUCT_CWU528                    0x260D
+#define YUGA_PRODUCT_CWU581                    0x260E
 #define YUGA_PRODUCT_CWU526                    0x260F
-
-#define YUGA_PRODUCT_CLM600                    0x2601
-#define YUGA_PRODUCT_CLM610                    0x2602
-#define YUGA_PRODUCT_CLM500                    0x2603
-#define YUGA_PRODUCT_CLM510                    0x2604
-#define YUGA_PRODUCT_CLM800                    0x2605
-#define YUGA_PRODUCT_CLM900                    0x2606
-
-#define YUGA_PRODUCT_CLU718                    0x2607
-#define YUGA_PRODUCT_CLU716                    0x2608
-#define YUGA_PRODUCT_CLU728                    0x2609
-#define YUGA_PRODUCT_CLU726                    0x260A
-#define YUGA_PRODUCT_CLU518                    0x260B
-#define YUGA_PRODUCT_CLU516                    0x260C
-#define YUGA_PRODUCT_CLU528                    0x260D
-#define YUGA_PRODUCT_CLU526                    0x260F
+#define YUGA_PRODUCT_CWU582                    0x261F
+#define YUGA_PRODUCT_CWU583                    0x262F
+
+#define YUGA_PRODUCT_CLM600                    0x3601
+#define YUGA_PRODUCT_CLM610                    0x3602
+#define YUGA_PRODUCT_CLM500                    0x3603
+#define YUGA_PRODUCT_CLM510                    0x3604
+#define YUGA_PRODUCT_CLM800                    0x3605
+#define YUGA_PRODUCT_CLM900                    0x3606
+
+#define YUGA_PRODUCT_CLU718                    0x3607
+#define YUGA_PRODUCT_CLU716                    0x3608
+#define YUGA_PRODUCT_CLU728                    0x3609
+#define YUGA_PRODUCT_CLU726                    0x360A
+#define YUGA_PRODUCT_CLU518                    0x360B
+#define YUGA_PRODUCT_CLU516                    0x360C
+#define YUGA_PRODUCT_CLU528                    0x360D
+#define YUGA_PRODUCT_CLU526                    0x360F
 
 /* Viettel products */
 #define VIETTEL_VENDOR_ID                      0x2262
@@ -489,6 +497,19 @@ static void option_instat_callback(struct urb *urb);
 
 /* MediaTek products */
 #define MEDIATEK_VENDOR_ID                     0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM               0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM               0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM               0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM             0x7101
+#define MEDIATEK_PRODUCT_7208_2COM             0x7102
+#define MEDIATEK_PRODUCT_FP_1COM               0x0003
+#define MEDIATEK_PRODUCT_FP_2COM               0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM             0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM             0x0033
+
+/* Cellient products */
+#define CELLIENT_VENDOR_ID                     0x2692
+#define CELLIENT_PRODUCT_MEN200                        0x9005
 
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
@@ -542,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
        .reserved = BIT(1),
 };
 
+static const struct option_blacklist_info net_intf2_blacklist = {
+       .reserved = BIT(2),
+};
+
 static const struct option_blacklist_info net_intf3_blacklist = {
        .reserved = BIT(3),
 };
@@ -666,6 +691,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -722,6 +752,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+       /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1080,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
          0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1209,6 +1243,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
        { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
        { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1216,6 +1255,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) },        /* MediaTek MT6276M modem & app port */
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1245,7 +1296,7 @@ static struct usb_serial_driver option_1port_device = {
        .ioctl             = usb_wwan_ioctl,
        .attach            = usb_wwan_startup,
        .disconnect        = usb_wwan_disconnect,
-       .release           = usb_wwan_release,
+       .release           = option_release,
        .read_int_callback = option_instat_callback,
 #ifdef CONFIG_PM
        .suspend           = usb_wwan_suspend,
@@ -1259,35 +1310,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
 
 static bool debug;
 
-/* per port private data */
-
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 4096
-
-struct option_port_private {
-       /* Input endpoints and buffer for this port */
-       struct urb *in_urbs[N_IN_URB];
-       u8 *in_buffer[N_IN_URB];
-       /* Output endpoints and buffer for this port */
-       struct urb *out_urbs[N_OUT_URB];
-       u8 *out_buffer[N_OUT_URB];
-       unsigned long out_busy;         /* Bit vector of URBs in use */
-       int opened;
-       struct usb_anchor delayed;
-
-       /* Settings for the port */
-       int rts_state;  /* Handshaking pins (outputs) */
-       int dtr_state;
-       int cts_state;  /* Handshaking pins (inputs) */
-       int dsr_state;
-       int dcd_state;
-       int ri_state;
-
-       unsigned long tx_start_time[N_OUT_URB];
-};
-
 module_usb_serial_driver(serial_drivers, option_ids);
 
 static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1378,22 @@ static int option_probe(struct usb_serial *serial,
        return 0;
 }
 
+static void option_release(struct usb_serial *serial)
+{
+       struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+       usb_wwan_release(serial);
+
+       kfree(priv);
+}
+
 static void option_instat_callback(struct urb *urb)
 {
        int err;
        int status = urb->status;
        struct usb_serial_port *port =  urb->context;
-       struct option_port_private *portdata = usb_get_serial_port_data(port);
+       struct usb_wwan_port_private *portdata =
+                                       usb_get_serial_port_data(port);
 
        dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
 
@@ -1421,7 +1453,7 @@ static int option_send_setup(struct usb_serial_port *port)
        struct usb_serial *serial = port->serial;
        struct usb_wwan_intf_private *intfdata =
                (struct usb_wwan_intf_private *) serial->private;
-       struct option_port_private *portdata;
+       struct usb_wwan_port_private *portdata;
        int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
        int val = 0;
 
index 0d5fe59ebb9e980c9b2180599839f0e71ed0d09b..996015c5f1acd8e9768586bf0c81a68b886f4c07 100644 (file)
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1410, 0xa021)},   /* Novatel Gobi 3000 Composite */
        {USB_DEVICE(0x413c, 0x8193)},   /* Dell Gobi 3000 QDL */
        {USB_DEVICE(0x413c, 0x8194)},   /* Dell Gobi 3000 Composite */
+       {USB_DEVICE(0x1199, 0x9010)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9012)},   /* Sierra Wireless Gobi 3000 QDL */
        {USB_DEVICE(0x1199, 0x9013)},   /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+       {USB_DEVICE(0x1199, 0x9014)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9015)},   /* Sierra Wireless Gobi 3000 Modem device */
+       {USB_DEVICE(0x1199, 0x9018)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9019)},   /* Sierra Wireless Gobi 3000 Modem device */
        {USB_DEVICE(0x12D1, 0x14F0)},   /* Sony Gobi 3000 QDL */
        {USB_DEVICE(0x12D1, 0x14F1)},   /* Sony Gobi 3000 Composite */
        { }                             /* Terminating entry */
index ba54a0a8235c08f71319098cd77ba62833f00301..d423d36acc043f9084951dfb0c9e4b3292f222f7 100644 (file)
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       /* AT&T Direct IP LTE modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
        { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
index 6a1b609a0d94cff247460de6716a71ecfadbe707..27483f91a4a38a41bfdd3f2a5c9583be33ac8027 100644 (file)
@@ -659,12 +659,14 @@ exit:
 static struct usb_serial_driver *search_serial_device(
                                        struct usb_interface *iface)
 {
-       const struct usb_device_id *id;
+       const struct usb_device_id *id = NULL;
        struct usb_serial_driver *drv;
+       struct usb_driver *driver = to_usb_driver(iface->dev.driver);
 
        /* Check if the usb id matches a known device */
        list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
-               id = get_iface_id(drv, iface);
+               if (drv->usb_driver == driver)
+                       id = get_iface_id(drv, iface);
                if (id)
                        return drv;
        }
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface,
 
                if (retval) {
                        dbg("sub driver rejected device");
-                       kfree(serial);
+                       usb_serial_put(serial);
                        module_put(type->driver.owner);
                        return retval;
                }
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface,
                 */
                if (num_bulk_in == 0 || num_bulk_out == 0) {
                        dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
-                       kfree(serial);
+                       usb_serial_put(serial);
                        module_put(type->driver.owner);
                        return -ENODEV;
                }
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface,
                if (num_ports == 0) {
                        dev_err(&interface->dev,
                            "Generic device with no bulk out, not allowed.\n");
-                       kfree(serial);
+                       usb_serial_put(serial);
                        module_put(type->driver.owner);
                        return -EIO;
                }
index a324a5d21e99e5ec7d7cba790e804ce003c064c9..11418da9bc0927492b820383ca433f309897e2fe 100644 (file)
@@ -202,6 +202,12 @@ static int slave_configure(struct scsi_device *sdev)
                if (us->fflags & US_FL_NO_READ_CAPACITY_16)
                        sdev->no_read_capacity_16 = 1;
 
+               /*
+                * Many devices do not respond properly to READ_CAPACITY_16.
+                * Tell the SCSI layer to try READ_CAPACITY_10 first.
+                */
+               sdev->try_rc_10_first = 1;
+
                /* assume SPC3 or latter devices support sense size > 18 */
                if (sdev->scsi_level > SCSI_SPC_2)
                        us->fflags |= US_FL_SANE_SENSE;
index 94dbd25caa303c44c4c972230fcb2752da619fc0..112156f68afb2117875087447d01222d74b33ba7 100644 (file)
@@ -191,7 +191,9 @@ static int vhost_worker(void *data)
        struct vhost_dev *dev = data;
        struct vhost_work *work = NULL;
        unsigned uninitialized_var(seq);
+       mm_segment_t oldfs = get_fs();
 
+       set_fs(USER_DS);
        use_mm(dev->mm);
 
        for (;;) {
@@ -229,6 +231,7 @@ static int vhost_worker(void *data)
 
        }
        unuse_mm(dev->mm);
+       set_fs(oldfs);
        return 0;
 }
 
index a290be51a1f4a2f412aa80976c85a396eb60db30..0217f7415ef5d6997dd82de449066a2c9643b096 100644 (file)
@@ -2210,7 +2210,7 @@ config FB_XILINX
 
 config FB_COBALT
        tristate "Cobalt server LCD frame buffer support"
-       depends on FB && MIPS_COBALT
+       depends on FB && (MIPS_COBALT || MIPS_SEAD3)
 
 config FB_SH7760
        bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2382,6 +2382,39 @@ config FB_BROADSHEET
          and could also have been called by other names when coupled with
          a bridge adapter.
 
+config FB_AUO_K190X
+       tristate "AUO-K190X EPD controller support"
+       depends on FB
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       select FB_SYS_FOPS
+       select FB_DEFERRED_IO
+       help
+         Provides support for epaper controllers from the K190X series
+         of AUO. These controllers can be used to drive epaper displays
+         from Sipix.
+
+         This option enables the common support, shared by the individual
+         controller drivers. You will also have to enable the driver
+         for the controller type used in your device.
+
+config FB_AUO_K1900
+       tristate "AUO-K1900 EPD controller support"
+       depends on FB && FB_AUO_K190X
+       help
+         This driver implements support for the AUO K1900 epd-controller.
+         This controller can drive Sipix epaper displays but can only do
+         serial updates, reducing the number of possible frames per second.
+
+config FB_AUO_K1901
+       tristate "AUO-K1901 EPD controller support"
+       depends on FB && FB_AUO_K190X
+       help
+         This driver implements support for the AUO K1901 epd-controller.
+         This controller can drive Sipix epaper displays and supports
+         concurrent updates, making higher frames per second possible.
+
 config FB_JZ4740
        tristate "JZ4740 LCD framebuffer support"
        depends on FB && MACH_JZ4740
index 9356add945b319ea4bfc9003317306009456d5ee..ee8dafb69e369dd644e6f5354573d00de3463fa8 100644 (file)
@@ -118,6 +118,9 @@ obj-$(CONFIG_FB_PMAGB_B)      += pmagb-b-fb.o
 obj-$(CONFIG_FB_MAXINE)                  += maxinefb.o
 obj-$(CONFIG_FB_METRONOME)        += metronomefb.o
 obj-$(CONFIG_FB_BROADSHEET)       += broadsheetfb.o
+obj-$(CONFIG_FB_AUO_K190X)       += auo_k190x.o
+obj-$(CONFIG_FB_AUO_K1900)       += auo_k1900fb.o
+obj-$(CONFIG_FB_AUO_K1901)       += auo_k1901fb.o
 obj-$(CONFIG_FB_S1D13XXX)        += s1d13xxxfb.o
 obj-$(CONFIG_FB_SH7760)                  += sh7760fb.o
 obj-$(CONFIG_FB_IMX)              += imxfb.o
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
new file mode 100644 (file)
index 0000000..c36cf96
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1900 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1900 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2 4 step gray (2bit) - FIXME: add strange refresh
+ * mode3 2 step gray (1bit) - FIXME: add strange refresh
+ * mode4 handwriting mode (strange behaviour)
+ * mode5 automatic selection of update mode
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1900 specific commands
+ */
+
+#define AUOK1900_CMD_PARTIALDISP       0x1001
+#define AUOK1900_CMD_ROTATION          0x1006
+#define AUOK1900_CMD_LUT_STOP          0x1009
+
+#define AUOK1900_INIT_TEMP_AVERAGE     (1 << 13)
+#define AUOK1900_INIT_ROTATE(_x)       ((_x & 0x3) << 10)
+#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2)
+
+static void auok1900_init(struct auok190xfb_par *par)
+{
+       struct auok190x_board *board = par->board;
+       u16 init_param = 0;
+
+       init_param |= AUOK1900_INIT_TEMP_AVERAGE;
+       init_param |= AUOK1900_INIT_ROTATE(par->rotation);
+       init_param |= AUOK190X_INIT_INVERSE_WHITE;
+       init_param |= AUOK190X_INIT_FORMAT0;
+       init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
+       init_param |= AUOK190X_INIT_SHIFT_RIGHT;
+
+       auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+       /* let the controller finish */
+       board->wait_for_rdy(par);
+}
+
+static void auok1900_update_region(struct auok190xfb_par *par, int mode,
+                                               u16 y1, u16 y2)
+{
+       struct device *dev = par->info->device;
+       unsigned char *buf = (unsigned char *)par->info->screen_base;
+       int xres = par->info->var.xres;
+       u16 args[4];
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+       y1 &= 0xfffe;
+       y2 &= 0xfffe;
+
+       dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+               1, y1+1, xres, y2-y1, mode);
+
+       /* to FIX handle different partial update modes */
+       args[0] = mode | 1;
+       args[1] = y1 + 1;
+       args[2] = xres;
+       args[3] = y2 - y1;
+       buf += y1 * xres;
+       auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
+                                    ((y2 - y1) * xres)/2, (u16 *) buf);
+       auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
+
+       par->update_cnt++;
+
+       mutex_unlock(&(par->io_lock));
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
+                                               u16 y1, u16 y2)
+{
+       int mode;
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(1);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1900_update_region(par, mode, y1, y2);
+}
+
+static void auok1900fb_dpy_update(struct auok190xfb_par *par)
+{
+       int mode;
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(0);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1900_update_region(par, mode, 0, par->info->var.yres);
+       par->update_cnt = 0;
+}
+
+static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
+{
+       return (par->update_cnt > 10);
+}
+
+static int __devinit auok1900fb_probe(struct platform_device *pdev)
+{
+       struct auok190x_init_data init;
+       struct auok190x_board *board;
+
+       /* pick up board specific routines */
+       board = pdev->dev.platform_data;
+       if (!board)
+               return -EINVAL;
+
+       /* fill temporary init struct for common init */
+       init.id = "auo_k1900fb";
+       init.board = board;
+       init.update_partial = auok1900fb_dpy_update_pages;
+       init.update_all = auok1900fb_dpy_update;
+       init.need_refresh = auok1900fb_need_refresh;
+       init.init = auok1900_init;
+
+       return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1900fb_remove(struct platform_device *pdev)
+{
+       return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1900fb_driver = {
+       .probe  = auok1900fb_probe,
+       .remove = __devexit_p(auok1900fb_remove),
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "auo_k1900fb",
+               .pm = &auok190x_pm,
+       },
+};
+module_platform_driver(auok1900fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
new file mode 100644 (file)
index 0000000..1c054c1
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1901 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1901 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2+3 4 step gray (2bit)
+ * mode4+5 2 step gray (1bit)
+ * - mode4 is described as "without LUT"
+ * mode7 automatic selection of update mode
+ *
+ * The most interesting difference to the K1900 is the ability to do screen
+ * updates in an asynchronous fashion. Where the K1900 needs to wait for the
+ * current update to complete, the K1901 can process later updates already.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1901 specific commands
+ */
+
+#define AUOK1901_CMD_LUT_INTERFACE     0x0005
+#define AUOK1901_CMD_DMA_START         0x1001
+#define AUOK1901_CMD_CURSOR_START      0x1007
+#define AUOK1901_CMD_CURSOR_STOP       AUOK190X_CMD_DATA_STOP
+#define AUOK1901_CMD_DDMA_START                0x1009
+
+#define AUOK1901_INIT_GATE_PULSE_LOW   (0 << 14)
+#define AUOK1901_INIT_GATE_PULSE_HIGH  (1 << 14)
+#define AUOK1901_INIT_SINGLE_GATE      (0 << 13)
+#define AUOK1901_INIT_DOUBLE_GATE      (1 << 13)
+
+/* Bits to pixels
+ *   Mode      15-12   11-8    7-4     3-0
+ *   format2   2       T       1       T
+ *   format3   1       T       2       T
+ *   format4   T       2       T       1
+ *   format5   T       1       T       2
+ *
+ *   halftone modes:
+ *   format6   2       2       1       1
+ *   format7   1       1       2       2
+ */
+#define AUOK1901_INIT_FORMAT2          (1 << 7)
+#define AUOK1901_INIT_FORMAT3          ((1 << 7) | (1 << 6))
+#define AUOK1901_INIT_FORMAT4          (1 << 8)
+#define AUOK1901_INIT_FORMAT5          ((1 << 8) | (1 << 6))
+#define AUOK1901_INIT_FORMAT6          ((1 << 8) | (1 << 7))
+#define AUOK1901_INIT_FORMAT7          ((1 << 8) | (1 << 7) | (1 << 6))
+
+/* res[4] to bit 10
+ * res[3-0] to bits 5-2
+ */
+#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \
+                                        | ((_res & 0xf) << 2))
+
+/*
+ * portrait / landscape orientation in AUOK1901_CMD_DMA_START
+ */
+#define AUOK1901_DMA_ROTATE90(_rot)            ((_rot & 1) << 13)
+
+/*
+ * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
+ */
+#define AUOK1901_DDMA_ROTATE180(_rot)          ((~_rot & 2) << 10)
+
+static void auok1901_init(struct auok190xfb_par *par)
+{
+       struct auok190x_board *board = par->board;
+       u16 init_param = 0;
+
+       init_param |= AUOK190X_INIT_INVERSE_WHITE;
+       init_param |= AUOK190X_INIT_FORMAT0;
+       init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
+       init_param |= AUOK190X_INIT_SHIFT_LEFT;
+
+       auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+       /* let the controller finish */
+       board->wait_for_rdy(par);
+}
+
+static void auok1901_update_region(struct auok190xfb_par *par, int mode,
+                                               u16 y1, u16 y2)
+{
+       struct device *dev = par->info->device;
+       unsigned char *buf = (unsigned char *)par->info->screen_base;
+       int xres = par->info->var.xres;
+       u16 args[5];
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+       y1 &= 0xfffe;
+       y2 &= 0xfffe;
+
+       dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+               1, y1+1, xres, y2-y1, mode);
+
+       /* K1901: first transfer the region data */
+       args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
+       args[1] = y1 + 1;
+       args[2] = xres;
+       args[3] = y2 - y1;
+       buf += y1 * xres;
+       auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
+                                           args, ((y2 - y1) * xres)/2,
+                                           (u16 *) buf);
+       auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
+
+       /* K1901: second tell the controller to update the region with mode */
+       args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
+       args[1] = 1;
+       args[2] = y1 + 1;
+       args[3] = xres;
+       args[4] = y2 - y1;
+       auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
+
+       par->update_cnt++;
+
+       mutex_unlock(&(par->io_lock));
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
+                                               u16 y1, u16 y2)
+{
+       int mode;
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(1);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1901_update_region(par, mode, y1, y2);
+}
+
+static void auok1901fb_dpy_update(struct auok190xfb_par *par)
+{
+       int mode;
+
+       /* When doing full updates, wait for the controller to be ready
+        * This will hopefully catch some hangs of the K1901
+        */
+       par->board->wait_for_rdy(par);
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(0);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1901_update_region(par, mode, 0, par->info->var.yres);
+       par->update_cnt = 0;
+}
+
+static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
+{
+       return (par->update_cnt > 10);
+}
+
+static int __devinit auok1901fb_probe(struct platform_device *pdev)
+{
+       struct auok190x_init_data init;
+       struct auok190x_board *board;
+
+       /* pick up board specific routines */
+       board = pdev->dev.platform_data;
+       if (!board)
+               return -EINVAL;
+
+       /* fill temporary init struct for common init */
+       init.id = "auo_k1901fb";
+       init.board = board;
+       init.update_partial = auok1901fb_dpy_update_pages;
+       init.update_all = auok1901fb_dpy_update;
+       init.need_refresh = auok1901fb_need_refresh;
+       init.init = auok1901_init;
+
+       return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1901fb_remove(struct platform_device *pdev)
+{
+       return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1901fb_driver = {
+       .probe  = auok1901fb_probe,
+       .remove = __devexit_p(auok1901fb_remove),
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "auo_k1901fb",
+               .pm = &auok190x_pm,
+       },
+};
+module_platform_driver(auok1901fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
new file mode 100644 (file)
index 0000000..77da6a2
--- /dev/null
@@ -0,0 +1,1046 @@
+/*
+ * Common code for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+struct panel_info {
+       int w;
+       int h;
+};
+
+/* table of panel specific parameters to be indexed into by the board drivers */
+static struct panel_info panel_table[] = {
+       /* standard 6" */
+       [AUOK190X_RESOLUTION_800_600] = {
+               .w = 800,
+               .h = 600,
+       },
+       /* standard 9" */
+       [AUOK190X_RESOLUTION_1024_768] = {
+               .w = 1024,
+               .h = 768,
+       },
+};
+
+/*
+ * private I80 interface to the board driver
+ */
+
+static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
+{
+       par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+       par->board->set_hdb(par, data);
+       par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+}
+
+static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
+{
+       par->board->set_ctl(par, AUOK190X_I80_DC, 0);
+       auok190x_issue_data(par, data);
+       par->board->set_ctl(par, AUOK190X_I80_DC, 1);
+}
+
+static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
+                                u16 *data)
+{
+       struct device *dev = par->info->device;
+       int i;
+       u16 tmp;
+
+       if (size & 3) {
+               dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
+                       size);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < (size >> 1); i++) {
+               par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+
+               /* simple reduction of 8bit staticgray to 4bit gray
+                * combines 4 * 4bit pixel values into a 16bit value
+                */
+               tmp  = (data[2*i] & 0xF0) >> 4;
+               tmp |= (data[2*i] & 0xF000) >> 8;
+               tmp |= (data[2*i+1] & 0xF0) << 4;
+               tmp |= (data[2*i+1] & 0xF000);
+
+               par->board->set_hdb(par, tmp);
+               par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+       }
+
+       return 0;
+}
+
+static u16 auok190x_read_data(struct auok190xfb_par *par)
+{
+       u16 data;
+
+       par->board->set_ctl(par, AUOK190X_I80_OE, 0);
+       data = par->board->get_hdb(par);
+       par->board->set_ctl(par, AUOK190X_I80_OE, 1);
+
+       return data;
+}
+
+/*
+ * Command interface for the controller drivers
+ */
+
+void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
+{
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+       auok190x_issue_cmd(par, data);
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
+
+void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv)
+{
+       int i;
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+       auok190x_issue_cmd(par, cmd);
+
+       for (i = 0; i < argc; i++)
+               auok190x_issue_data(par, argv[i]);
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
+
+int auok190x_send_command(struct auok190xfb_par *par, u16 data)
+{
+       int ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       auok190x_send_command_nowait(par, data);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command);
+
+int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                          int argc, u16 *argv)
+{
+       int ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
+
+int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                          int argc, u16 *argv)
+{
+       int i, ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+       auok190x_issue_cmd(par, cmd);
+
+       for (i = 0; i < argc; i++)
+               argv[i] = auok190x_read_data(par);
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
+
+void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv, int size, u16 *data)
+{
+       int i;
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+
+       auok190x_issue_cmd(par, cmd);
+
+       for (i = 0; i < argc; i++)
+               auok190x_issue_data(par, argv[i]);
+
+       auok190x_issue_pixels(par, size, data);
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
+
+int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv, int size, u16 *data)
+{
+       int ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
+
+/*
+ * fbdefio callbacks - common on both controllers.
+ */
+
+static void auok190xfb_dpy_first_io(struct fb_info *info)
+{
+       /* tell runtime-pm that we wish to use the device in a short time */
+       pm_runtime_get(info->device);
+}
+
+/* this is called back from the deferred io workqueue */
+static void auok190xfb_dpy_deferred_io(struct fb_info *info,
+                               struct list_head *pagelist)
+{
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct auok190xfb_par *par = info->par;
+       u16 yres = info->var.yres;
+       u16 xres = info->var.xres;
+       u16 y1 = 0, h = 0;
+       int prev_index = -1;
+       struct page *cur;
+       int h_inc;
+       int threshold;
+
+       if (!list_empty(pagelist))
+               /* the device resume should've been requested through first_io,
+                * if the resume did not finish until now, wait for it.
+                */
+               pm_runtime_barrier(info->device);
+       else
+               /* We reached this via the fsync or some other way.
+                * In either case the first_io function did not run,
+                * so we runtime_resume the device here synchronously.
+                */
+               pm_runtime_get_sync(info->device);
+
+       /* Do a full screen update every n updates to prevent
+        * excessive darkening of the Sipix display.
+        * If we do this, there is no need to walk the pages.
+        */
+       if (par->need_refresh(par)) {
+               par->update_all(par);
+               goto out;
+       }
+
+       /* height increment is fixed per page */
+       h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
+
+       /* calculate number of pages from pixel height */
+       threshold = par->consecutive_threshold / h_inc;
+       if (threshold < 1)
+               threshold = 1;
+
+       /* walk the written page list and swizzle the data */
+       list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+               if (prev_index < 0) {
+                       /* just starting so assign first page */
+                       y1 = (cur->index << PAGE_SHIFT) / xres;
+                       h = h_inc;
+               } else if ((cur->index - prev_index) <= threshold) {
+                       /* page is within our threshold for single updates */
+                       h += h_inc * (cur->index - prev_index);
+               } else {
+                       /* page not consecutive, issue previous update first */
+                       par->update_partial(par, y1, y1 + h);
+
+                       /* start over with our non consecutive page */
+                       y1 = (cur->index << PAGE_SHIFT) / xres;
+                       h = h_inc;
+               }
+               prev_index = cur->index;
+       }
+
+       /* if we still have any pages to update we do so now */
+       if (h >= yres)
+               /* its a full screen update, just do it */
+               par->update_all(par);
+       else
+               par->update_partial(par, y1, min((u16) (y1 + h), yres));
+
+out:
+       pm_runtime_mark_last_busy(info->device);
+       pm_runtime_put_autosuspend(info->device);
+}
+
+/*
+ * framebuffer operations
+ */
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       struct auok190xfb_par *par = info->par;
+       unsigned long p = *ppos;
+       void *dst;
+       int err = 0;
+       unsigned long total_size;
+
+       if (info->state != FBINFO_STATE_RUNNING)
+               return -EPERM;
+
+       total_size = info->fix.smem_len;
+
+       if (p > total_size)
+               return -EFBIG;
+
+       if (count > total_size) {
+               err = -EFBIG;
+               count = total_size;
+       }
+
+       if (count + p > total_size) {
+               if (!err)
+                       err = -ENOSPC;
+
+               count = total_size - p;
+       }
+
+       dst = (void *)(info->screen_base + p);
+
+       if (copy_from_user(dst, buf, count))
+               err = -EFAULT;
+
+       if  (!err)
+               *ppos += count;
+
+       par->update_all(par);
+
+       return (err) ? err : count;
+}
+
+static void auok190xfb_fillrect(struct fb_info *info,
+                                  const struct fb_fillrect *rect)
+{
+       struct auok190xfb_par *par = info->par;
+
+       sys_fillrect(info, rect);
+
+       par->update_all(par);
+}
+
+static void auok190xfb_copyarea(struct fb_info *info,
+                                  const struct fb_copyarea *area)
+{
+       struct auok190xfb_par *par = info->par;
+
+       sys_copyarea(info, area);
+
+       par->update_all(par);
+}
+
+static void auok190xfb_imageblit(struct fb_info *info,
+                               const struct fb_image *image)
+{
+       struct auok190xfb_par *par = info->par;
+
+       sys_imageblit(info, image);
+
+       par->update_all(par);
+}
+
+static int auok190xfb_check_var(struct fb_var_screeninfo *var,
+                                  struct fb_info *info)
+{
+       if (info->var.xres != var->xres || info->var.yres != var->yres ||
+           info->var.xres_virtual != var->xres_virtual ||
+           info->var.yres_virtual != var->yres_virtual) {
+               pr_info("%s: Resolution not supported: X%u x Y%u\n",
+                        __func__, var->xres, var->yres);
+               return -EINVAL;
+       }
+
+       /*
+        *  Memory limit
+        */
+
+       if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+               pr_info("%s: Memory Limit requested yres_virtual = %u\n",
+                        __func__, var->yres_virtual);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static struct fb_ops auok190xfb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_read        = fb_sys_read,
+       .fb_write       = auok190xfb_write,
+       .fb_fillrect    = auok190xfb_fillrect,
+       .fb_copyarea    = auok190xfb_copyarea,
+       .fb_imageblit   = auok190xfb_imageblit,
+       .fb_check_var   = auok190xfb_check_var,
+};
+
+/*
+ * Controller-functions common to both K1900 and K1901
+ */
+
+static int auok190x_read_temperature(struct auok190xfb_par *par)
+{
+       struct device *dev = par->info->device;
+       u16 data[4];
+       int temp;
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+       mutex_unlock(&(par->io_lock));
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+
+       /* sanitize and split of half-degrees for now */
+       temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
+
+       /* handle positive and negative temperatures */
+       if (temp >= 201)
+               return (255 - temp + 1) * (-1);
+       else
+               return temp;
+}
+
+static void auok190x_identify(struct auok190xfb_par *par)
+{
+       struct device *dev = par->info->device;
+       u16 data[4];
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+       mutex_unlock(&(par->io_lock));
+
+       par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
+
+       par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
+       par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
+       par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
+
+       par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
+       par->lut_version = AUOK190X_VERSION_LUT(data[3]);
+
+       dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
+               par->panel_size_int, par->panel_size_float, par->panel_model,
+               par->epd_type, par->tcon_version, par->lut_version);
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
+/*
+ * Sysfs functions
+ */
+
+static ssize_t update_mode_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+
+       return sprintf(buf, "%d\n", par->update_mode);
+}
+
+static ssize_t update_mode_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+       int mode, ret;
+
+       ret = kstrtoint(buf, 10, &mode);
+       if (ret)
+               return ret;
+
+       par->update_mode = mode;
+
+       /* if we enter a better mode, do a full update */
+       if (par->last_mode > 1 && mode < par->last_mode)
+               par->update_all(par);
+
+       return count;
+}
+
+static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+
+       return sprintf(buf, "%d\n", par->flash);
+}
+
+static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+       int flash, ret;
+
+       ret = kstrtoint(buf, 10, &flash);
+       if (ret)
+               return ret;
+
+       if (flash > 0)
+               par->flash = 1;
+       else
+               par->flash = 0;
+
+       return count;
+}
+
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+       int temp;
+
+       temp = auok190x_read_temperature(par);
+       return sprintf(buf, "%d\n", temp);
+}
+
+static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
+static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
+static DEVICE_ATTR(temp, 0644, temp_show, NULL);
+
+static struct attribute *auok190x_attributes[] = {
+       &dev_attr_update_mode.attr,
+       &dev_attr_flash.attr,
+       &dev_attr_temp.attr,
+       NULL
+};
+
+static const struct attribute_group auok190x_attr_group = {
+       .attrs          = auok190x_attributes,
+};
+
+static int auok190x_power(struct auok190xfb_par *par, bool on)
+{
+       struct auok190x_board *board = par->board;
+       int ret;
+
+       if (on) {
+               /* We should maintain POWER up for at least 80ms before set
+                * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
+                */
+               ret = regulator_enable(par->regulator);
+               if (ret)
+                       return ret;
+
+               msleep(200);
+               gpio_set_value(board->gpio_nrst, 1);
+               gpio_set_value(board->gpio_nsleep, 1);
+               msleep(200);
+       } else {
+               regulator_disable(par->regulator);
+               gpio_set_value(board->gpio_nrst, 0);
+               gpio_set_value(board->gpio_nsleep, 0);
+       }
+
+       return 0;
+}
+
+/*
+ * Recovery - powercycle the controller
+ */
+
+static void auok190x_recover(struct auok190xfb_par *par)
+{
+       auok190x_power(par, 0);
+       msleep(100);
+       auok190x_power(par, 1);
+
+       par->init(par);
+
+       /* wait for init to complete */
+       par->board->wait_for_rdy(par);
+}
+
+/*
+ * Power-management
+ */
+
+#ifdef CONFIG_PM
+static int auok190x_runtime_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+       u16 standby_param;
+
+       /* take and keep the lock until we are resumed, as the controller
+        * will never reach the non-busy state when in standby mode
+        */
+       mutex_lock(&(par->io_lock));
+
+       if (par->standby) {
+               dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
+               mutex_unlock(&(par->io_lock));
+               return 0;
+       }
+
+       /* according to runtime_pm.txt runtime_suspend only means, that the
+        * device will not process data and will not communicate with the CPU
+        * As we hold the lock, this stays true even without standby
+        */
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               dev_dbg(dev, "runtime suspend without standby\n");
+               goto finish;
+       } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
+               /* for some TCON versions STANDBY expects a parameter (0) but
+                * it seems the real tcon version has to be determined yet.
+                */
+               dev_dbg(dev, "runtime suspend with additional empty param\n");
+               standby_param = 0;
+               auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
+                                     &standby_param);
+       } else {
+               dev_dbg(dev, "runtime suspend without param\n");
+               auok190x_send_command(par, AUOK190X_CMD_STANDBY);
+       }
+
+       msleep(64);
+
+finish:
+       par->standby = 1;
+
+       return 0;
+}
+
+static int auok190x_runtime_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+
+       if (!par->standby) {
+               dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
+               return 0;
+       }
+
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               dev_dbg(dev, "runtime resume without standby\n");
+       } else {
+               /* when in standby, controller is always busy
+                * and only accepts the wakeup command
+                */
+               dev_dbg(dev, "runtime resume from standby\n");
+               auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
+
+               msleep(160);
+
+               /* wait for the controller to be ready and release the lock */
+               board->wait_for_rdy(par);
+       }
+
+       par->standby = 0;
+
+       mutex_unlock(&(par->io_lock));
+
+       return 0;
+}
+
+static int auok190x_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+       int ret;
+
+       dev_dbg(dev, "suspend\n");
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               /* suspend via powering off the ic */
+               dev_dbg(dev, "suspend with broken standby\n");
+
+               auok190x_power(par, 0);
+       } else {
+               dev_dbg(dev, "suspend using sleep\n");
+
+               /* the sleep state can only be entered from the standby state.
+                * pm_runtime_get_noresume gets called before the suspend call.
+                * So the devices usage count is >0 but it is not necessarily
+                * active.
+                */
+               if (!pm_runtime_status_suspended(dev)) {
+                       ret = auok190x_runtime_suspend(dev);
+                       if (ret < 0) {
+                               dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
+                                       ret);
+                               return ret;
+                       }
+                       par->manual_standby = 1;
+               }
+
+               gpio_direction_output(board->gpio_nsleep, 0);
+       }
+
+       msleep(100);
+
+       return 0;
+}
+
+static int auok190x_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+
+       dev_dbg(dev, "resume\n");
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               dev_dbg(dev, "resume with broken standby\n");
+
+               auok190x_power(par, 1);
+
+               par->init(par);
+       } else {
+               dev_dbg(dev, "resume from sleep\n");
+
+               /* device should be in runtime suspend when we were suspended
+                * and pm_runtime_put_sync gets called after this function.
+                * So there is no need to touch the standby mode here at all.
+                */
+               gpio_direction_output(board->gpio_nsleep, 1);
+               msleep(100);
+
+               /* an additional init call seems to be necessary after sleep */
+               auok190x_runtime_resume(dev);
+               par->init(par);
+
+               /* if we were runtime-suspended before, suspend again*/
+               if (!par->manual_standby)
+                       auok190x_runtime_suspend(dev);
+               else
+                       par->manual_standby = 0;
+       }
+
+       return 0;
+}
+#endif
+
+const struct dev_pm_ops auok190x_pm = {
+       SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
+                          NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
+};
+EXPORT_SYMBOL_GPL(auok190x_pm);
+
+/*
+ * Common probe and remove code
+ */
+
+int __devinit auok190x_common_probe(struct platform_device *pdev,
+                                   struct auok190x_init_data *init)
+{
+       struct auok190x_board *board = init->board;
+       struct auok190xfb_par *par;
+       struct fb_info *info;
+       struct panel_info *panel;
+       int videomemorysize, ret;
+       unsigned char *videomemory;
+
+       /* check board contents */
+       if (!board->init || !board->cleanup || !board->wait_for_rdy
+           || !board->set_ctl || !board->set_hdb || !board->get_hdb
+           || !board->setup_irq)
+               return -EINVAL;
+
+       info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
+       if (!info)
+               return -ENOMEM;
+
+       par = info->par;
+       par->info = info;
+       par->board = board;
+       par->recover = auok190x_recover;
+       par->update_partial = init->update_partial;
+       par->update_all = init->update_all;
+       par->need_refresh = init->need_refresh;
+       par->init = init->init;
+
+       /* init update modes */
+       par->update_cnt = 0;
+       par->update_mode = -1;
+       par->last_mode = -1;
+       par->flash = 0;
+
+       par->regulator = regulator_get(info->device, "vdd");
+       if (IS_ERR(par->regulator)) {
+               ret = PTR_ERR(par->regulator);
+               dev_err(info->device, "Failed to get regulator: %d\n", ret);
+               goto err_reg;
+       }
+
+       ret = board->init(par);
+       if (ret) {
+               dev_err(info->device, "board init failed, %d\n", ret);
+               goto err_board;
+       }
+
+       ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
+       if (ret) {
+               dev_err(info->device, "could not request sleep gpio, %d\n",
+                       ret);
+               goto err_gpio1;
+       }
+
+       ret = gpio_direction_output(board->gpio_nsleep, 0);
+       if (ret) {
+               dev_err(info->device, "could not set sleep gpio, %d\n", ret);
+               goto err_gpio2;
+       }
+
+       ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
+       if (ret) {
+               dev_err(info->device, "could not request reset gpio, %d\n",
+                       ret);
+               goto err_gpio2;
+       }
+
+       ret = gpio_direction_output(board->gpio_nrst, 0);
+       if (ret) {
+               dev_err(info->device, "could not set reset gpio, %d\n", ret);
+               goto err_gpio3;
+       }
+
+       ret = auok190x_power(par, 1);
+       if (ret) {
+               dev_err(info->device, "could not power on the device, %d\n",
+                       ret);
+               goto err_gpio3;
+       }
+
+       mutex_init(&par->io_lock);
+
+       init_waitqueue_head(&par->waitq);
+
+       ret = par->board->setup_irq(par->info);
+       if (ret) {
+               dev_err(info->device, "could not setup ready-irq, %d\n", ret);
+               goto err_irq;
+       }
+
+       /* wait for init to complete */
+       par->board->wait_for_rdy(par);
+
+       /*
+        * From here on the controller can talk to us
+        */
+
+       /* initialise fix, var, resolution and rotation */
+
+       strlcpy(info->fix.id, init->id, 16);
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
+       info->fix.xpanstep = 0;
+       info->fix.ypanstep = 0;
+       info->fix.ywrapstep = 0;
+       info->fix.accel = FB_ACCEL_NONE;
+
+       info->var.bits_per_pixel = 8;
+       info->var.grayscale = 1;
+       info->var.red.length = 8;
+       info->var.green.length = 8;
+       info->var.blue.length = 8;
+
+       panel = &panel_table[board->resolution];
+
+       /* if 90 degree rotation, switch width and height */
+       if (board->rotation & 1) {
+               info->var.xres = panel->h;
+               info->var.yres = panel->w;
+               info->var.xres_virtual = panel->h;
+               info->var.yres_virtual = panel->w;
+               info->fix.line_length = panel->h;
+       } else {
+               info->var.xres = panel->w;
+               info->var.yres = panel->h;
+               info->var.xres_virtual = panel->w;
+               info->var.yres_virtual = panel->h;
+               info->fix.line_length = panel->w;
+       }
+
+       par->resolution = board->resolution;
+       par->rotation = board->rotation;
+
+       /* videomemory handling */
+
+       videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE);
+       videomemory = vmalloc(videomemorysize);
+       if (!videomemory) {
+               ret = -ENOMEM;
+               goto err_irq;
+       }
+
+       memset(videomemory, 0, videomemorysize);
+       info->screen_base = (char *)videomemory;
+       info->fix.smem_len = videomemorysize;
+
+       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+       info->fbops = &auok190xfb_ops;
+
+       /* deferred io init */
+
+       info->fbdefio = devm_kzalloc(info->device,
+                                    sizeof(struct fb_deferred_io),
+                                    GFP_KERNEL);
+       if (!info->fbdefio) {
+               dev_err(info->device, "Failed to allocate memory\n");
+               ret = -ENOMEM;
+               goto err_defio;
+       }
+
+       dev_dbg(info->device, "targetting %d frames per second\n", board->fps);
+       info->fbdefio->delay = HZ / board->fps;
+       info->fbdefio->first_io = auok190xfb_dpy_first_io,
+       info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
+       fb_deferred_io_init(info);
+
+       /* color map */
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret < 0) {
+               dev_err(info->device, "Failed to allocate colormap\n");
+               goto err_cmap;
+       }
+
+       /* controller init */
+
+       par->consecutive_threshold = 100;
+       par->init(par);
+       auok190x_identify(par);
+
+       platform_set_drvdata(pdev, info);
+
+       ret = register_framebuffer(info);
+       if (ret < 0)
+               goto err_regfb;
+
+       ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
+       if (ret)
+               goto err_sysfs;
+
+       dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
+                info->node, info->var.xres, info->var.yres,
+                videomemorysize >> 10);
+
+       /* increase autosuspend_delay when we use alternative methods
+        * for runtime_pm
+        */
+       par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
+                                       ? 1000 : 200;
+
+       pm_runtime_set_active(info->device);
+       pm_runtime_enable(info->device);
+       pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
+       pm_runtime_use_autosuspend(info->device);
+
+       return 0;
+
+err_sysfs:
+       unregister_framebuffer(info);
+err_regfb:
+       fb_dealloc_cmap(&info->cmap);
+err_cmap:
+       fb_deferred_io_cleanup(info);
+       kfree(info->fbdefio);
+err_defio:
+       vfree((void *)info->screen_base);
+err_irq:
+       auok190x_power(par, 0);
+err_gpio3:
+       gpio_free(board->gpio_nrst);
+err_gpio2:
+       gpio_free(board->gpio_nsleep);
+err_gpio1:
+       board->cleanup(par);
+err_board:
+       regulator_put(par->regulator);
+err_reg:
+       framebuffer_release(info);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_probe);
+
+int  __devexit auok190x_common_remove(struct platform_device *pdev)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+
+       pm_runtime_disable(info->device);
+
+       sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
+
+       unregister_framebuffer(info);
+
+       fb_dealloc_cmap(&info->cmap);
+
+       fb_deferred_io_cleanup(info);
+       kfree(info->fbdefio);
+
+       vfree((void *)info->screen_base);
+
+       auok190x_power(par, 0);
+
+       gpio_free(board->gpio_nrst);
+       gpio_free(board->gpio_nsleep);
+
+       board->cleanup(par);
+
+       regulator_put(par->regulator);
+
+       framebuffer_release(info);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_remove);
+
+MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h
new file mode 100644 (file)
index 0000000..e35af1f
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Private common definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * I80 interface specific defines
+ */
+
+#define AUOK190X_I80_CS                        0x01
+#define AUOK190X_I80_DC                        0x02
+#define AUOK190X_I80_WR                        0x03
+#define AUOK190X_I80_OE                        0x04
+
+/*
+ * AUOK190x commands, common to both controllers
+ */
+
+#define AUOK190X_CMD_INIT              0x0000
+#define AUOK190X_CMD_STANDBY           0x0001
+#define AUOK190X_CMD_WAKEUP            0x0002
+#define AUOK190X_CMD_TCON_RESET                0x0003
+#define AUOK190X_CMD_DATA_STOP         0x1002
+#define AUOK190X_CMD_LUT_START         0x1003
+#define AUOK190X_CMD_DISP_REFRESH      0x1004
+#define AUOK190X_CMD_DISP_RESET                0x1005
+#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D
+#define AUOK190X_CMD_PRE_DISPLAY_STOP  0x100F
+#define AUOK190X_CMD_FLASH_W           0x2000
+#define AUOK190X_CMD_FLASH_E           0x2001
+#define AUOK190X_CMD_FLASH_STS         0x2002
+#define AUOK190X_CMD_FRAMERATE         0x3000
+#define AUOK190X_CMD_READ_VERSION      0x4000
+#define AUOK190X_CMD_READ_STATUS       0x4001
+#define AUOK190X_CMD_READ_LUT          0x4003
+#define AUOK190X_CMD_DRIVERTIMING      0x5000
+#define AUOK190X_CMD_LBALANCE          0x5001
+#define AUOK190X_CMD_AGINGMODE         0x6000
+#define AUOK190X_CMD_AGINGEXIT         0x6001
+
+/*
+ * Common settings for AUOK190X_CMD_INIT
+ */
+
+#define AUOK190X_INIT_DATA_FILTER      (0 << 12)
+#define AUOK190X_INIT_DATA_BYPASS      (1 << 12)
+#define AUOK190X_INIT_INVERSE_WHITE    (0 << 9)
+#define AUOK190X_INIT_INVERSE_BLACK    (1 << 9)
+#define AUOK190X_INIT_SCAN_DOWN                (0 << 1)
+#define AUOK190X_INIT_SCAN_UP          (1 << 1)
+#define AUOK190X_INIT_SHIFT_LEFT       (0 << 0)
+#define AUOK190X_INIT_SHIFT_RIGHT      (1 << 0)
+
+/* Common bits to pixels
+ *   Mode      15-12   11-8    7-4     3-0
+ *   format0   4       3       2       1
+ *   format1   3       4       1       2
+ */
+
+#define AUOK190X_INIT_FORMAT0          0
+#define AUOK190X_INIT_FORMAT1          (1 << 6)
+
+/*
+ * settings for AUOK190X_CMD_RESET
+ */
+
+#define AUOK190X_RESET_TCON            (0 << 0)
+#define AUOK190X_RESET_NORMAL          (1 << 0)
+#define AUOK190X_RESET_PON             (1 << 1)
+
+/*
+ * AUOK190X_CMD_VERSION
+ */
+
+#define AUOK190X_VERSION_TEMP_MASK             (0x1ff)
+#define AUOK190X_VERSION_EPD_MASK              (0xff)
+#define AUOK190X_VERSION_SIZE_INT(_val)                ((_val & 0xfc00) >> 10)
+#define AUOK190X_VERSION_SIZE_FLOAT(_val)      ((_val & 0x3c0) >> 6)
+#define AUOK190X_VERSION_MODEL(_val)           (_val & 0x3f)
+#define AUOK190X_VERSION_LUT(_val)             (_val & 0xff)
+#define AUOK190X_VERSION_TCON(_val)            ((_val & 0xff00) >> 8)
+
+/*
+ * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
+ */
+
+#define AUOK190X_UPDATE_MODE(_res)             ((_res & 0x7) << 12)
+#define AUOK190X_UPDATE_NONFLASH               (1 << 15)
+
+/*
+ * track panel specific parameters for common init
+ */
+
+struct auok190x_init_data {
+       char *id;
+       struct auok190x_board *board;
+
+       void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+       void (*update_all)(struct auok190xfb_par *par);
+       bool (*need_refresh)(struct auok190xfb_par *par);
+       void (*init)(struct auok190xfb_par *par);
+};
+
+
+extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
+extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
+extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+                                        int argc, u16 *argv);
+extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv);
+extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
+                                               u16 cmd, int argc, u16 *argv,
+                                               int size, u16 *data);
+extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+                                       int argc, u16 *argv, int size,
+                                       u16 *data);
+extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv);
+
+extern int auok190x_common_probe(struct platform_device *pdev,
+                                struct auok190x_init_data *init);
+extern int auok190x_common_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops auok190x_pm;
index fa2b03750316b175f36f72d6f7cf8cf8a69bc375..2979292650d6494a5fddef3f3835da5931f37ec0 100644 (file)
@@ -88,7 +88,7 @@ config LCD_PLATFORM
 
 config LCD_TOSA
        tristate "Sharp SL-6000 LCD Driver"
-       depends on SPI && MACH_TOSA
+       depends on I2C && SPI && MACH_TOSA
        help
          If you have an Sharp SL-6000 Zaurus say Y to enable a driver
          for its LCD.
index 6c9399341bcf4aefcac58ffacae39f6b83ac5247..9327cd1b3143a93962ef5595895a43f655439ee5 100644 (file)
@@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
 
 EXPORT_SYMBOL_GPL(ili9320_probe_spi);
 
-int __devexit ili9320_remove(struct ili9320 *ili)
+int ili9320_remove(struct ili9320 *ili)
 {
        ili9320_power(ili, FB_BLANK_POWERDOWN);
 
index 1a268a294478d3de76613efd6581e542a296acc5..9bdd4b0c18c8e393994fe542babdbedc22bb6070 100644 (file)
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off,
 
 static int
 adv7393_write_proc(struct file *file, const char __user * buffer,
-                  unsigned long count, void *data)
+                  size_t count, void *data)
 {
        struct adv7393fb_device *fbdev = data;
-       char line[8];
        unsigned int val;
        int ret;
 
-       ret = copy_from_user(line, buffer, count);
+       ret = kstrtouint_from_user(buffer, count, 0, &val);
        if (ret)
                return -EFAULT;
 
-       val = simple_strtoul(line, NULL, 0);
        adv7393_write(fbdev->client, val >> 8, val & 0xff);
 
        return count;
@@ -414,14 +412,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
                if (ret) {
                        dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
                        ret = -EBUSY;
-                       goto out_8;
+                       goto free_fbdev;
                }
        }
 
        if (peripheral_request_list(ppi_pins, DRIVER_NAME)) {
                dev_err(&client->dev, "requesting PPI peripheral failed\n");
                ret = -EFAULT;
-               goto out_8;
+               goto free_gpio;
        }
 
        fbdev->fb_mem =
@@ -432,7 +430,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
                dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n",
                       (u32) fbdev->fb_len);
                ret = -ENOMEM;
-               goto out_7;
+               goto free_ppi_pins;
        }
 
        fbdev->info.screen_base = (void *)fbdev->fb_mem;
@@ -464,27 +462,27 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
        if (!fbdev->info.pseudo_palette) {
                dev_err(&client->dev, "failed to allocate pseudo_palette\n");
                ret = -ENOMEM;
-               goto out_6;
+               goto free_fb_mem;
        }
 
        if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
                dev_err(&client->dev, "failed to allocate colormap (%d entries)\n",
                           BFIN_LCD_NBR_PALETTE_ENTRIES);
                ret = -EFAULT;
-               goto out_5;
+               goto free_palette;
        }
 
        if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) {
                dev_err(&client->dev, "unable to request PPI DMA\n");
                ret = -EFAULT;
-               goto out_4;
+               goto free_cmap;
        }
 
        if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
                        "PPI ERROR", fbdev) < 0) {
                dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
                ret = -EFAULT;
-               goto out_3;
+               goto free_ch_ppi;
        }
 
        fbdev->open = 0;
@@ -494,14 +492,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
 
        if (ret) {
                dev_err(&client->dev, "i2c attach: init error\n");
-               goto out_1;
+               goto free_irq_ppi;
        }
 
 
        if (register_framebuffer(&fbdev->info) < 0) {
                dev_err(&client->dev, "unable to register framebuffer\n");
                ret = -EFAULT;
-               goto out_1;
+               goto free_irq_ppi;
        }
 
        dev_info(&client->dev, "fb%d: %s frame buffer device\n",
@@ -512,7 +510,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
        if (!entry) {
                dev_err(&client->dev, "unable to create /proc entry\n");
                ret = -EFAULT;
-               goto out_0;
+               goto free_fb;
        }
 
        entry->read_proc = adv7393_read_proc;
@@ -521,22 +519,25 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
 
        return 0;
 
- out_0:
+free_fb:
        unregister_framebuffer(&fbdev->info);
- out_1:
+free_irq_ppi:
        free_irq(IRQ_PPI_ERROR, fbdev);
- out_3:
+free_ch_ppi:
        free_dma(CH_PPI);
- out_4:
-       dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
-                         fbdev->dma_handle);
- out_5:
+free_cmap:
        fb_dealloc_cmap(&fbdev->info.cmap);
- out_6:
+free_palette:
        kfree(fbdev->info.pseudo_palette);
- out_7:
+free_fb_mem:
+       dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
+                         fbdev->dma_handle);
+free_ppi_pins:
        peripheral_free_list(ppi_pins);
- out_8:
+free_gpio:
+       if (ANOMALY_05000400)
+               gpio_free(P_IDENT(P_PPI0_FS3));
+free_fbdev:
        kfree(fbdev);
 
        return ret;
index 377dde3d5bfc8954aaccfc643b9664408e0c9da1..c95b417d0d41ae037cbcb4a01ff01c575e1b0aab 100644 (file)
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
 
 static struct platform_driver broadsheetfb_driver = {
        .probe  = broadsheetfb_probe,
-       .remove = broadsheetfb_remove,
+       .remove = __devexit_p(broadsheetfb_remove),
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "broadsheetfb",
index f56699d8122a381047bdcb345d44e1cabbb4414f..eae46f6457e2a59788c1fbd46995cab8ea6739c4 100644 (file)
@@ -1,7 +1,8 @@
 /*
- *  Cobalt server LCD frame buffer driver.
+ *  Cobalt/SEAD3 LCD frame buffer driver.
  *
  *  Copyright (C) 2008  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2012  MIPS Technologies, Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -62,6 +63,7 @@
 #define LCD_CUR_POS(x)         ((x) & LCD_CUR_POS_MASK)
 #define LCD_TEXT_POS(x)                ((x) | LCD_TEXT_MODE)
 
+#ifdef CONFIG_MIPS_COBALT
 static inline void lcd_write_control(struct fb_info *info, u8 control)
 {
        writel((u32)control << 24, info->screen_base);
@@ -81,6 +83,47 @@ static inline u8 lcd_read_data(struct fb_info *info)
 {
        return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
 }
+#else
+
+#define LCD_CTL                        0x00
+#define LCD_DATA               0x08
+#define CPLD_STATUS            0x10
+#define CPLD_DATA              0x18
+
+static inline void cpld_wait(struct fb_info *info)
+{
+       do {
+       } while (readl(info->screen_base + CPLD_STATUS) & 1);
+}
+
+static inline void lcd_write_control(struct fb_info *info, u8 control)
+{
+       cpld_wait(info);
+       writel(control, info->screen_base + LCD_CTL);
+}
+
+static inline u8 lcd_read_control(struct fb_info *info)
+{
+       cpld_wait(info);
+       readl(info->screen_base + LCD_CTL);
+       cpld_wait(info);
+       return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+
+static inline void lcd_write_data(struct fb_info *info, u8 data)
+{
+       cpld_wait(info);
+       writel(data, info->screen_base + LCD_DATA);
+}
+
+static inline u8 lcd_read_data(struct fb_info *info)
+{
+       cpld_wait(info);
+       readl(info->screen_base + LCD_DATA);
+       cpld_wait(info);
+       return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+#endif
 
 static int lcd_busy_wait(struct fb_info *info)
 {
index c2d11fef114b0ae7bc64045fea9f1e8cf58af6bf..e2c96d01d8f5dc8cc1a954e8cbfba69fdfd422c0 100644 (file)
@@ -224,5 +224,19 @@ config FONT_10x18
          big letters. It fits between the sun 12x22 and the normal 8x16 font.
          If other fonts are too big or too small for you, say Y, otherwise say N.
 
+config FONT_AUTOSELECT
+       def_bool y
+       depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
+       depends on !FONT_8x8
+       depends on !FONT_6x11
+       depends on !FONT_7x14
+       depends on !FONT_PEARL_8x8
+       depends on !FONT_ACORN_8x8
+       depends on !FONT_MINI_4x6
+       depends on !FONT_SUN8x16
+       depends on !FONT_SUN12x22
+       depends on !FONT_10x18
+       select FONT_8x16
+
 endmenu
 
index f8babbeee27543e9149af7319f42c60ef4808d3b..345d96230978ed33d8d4e2a860d66273f60c35a5 100644 (file)
@@ -507,16 +507,16 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
 
        err = fb_alloc_cmap(&info->cmap, 256, 0);
        if (err)
-               goto failed;
+               goto failed_cmap;
 
        err = ep93xxfb_alloc_videomem(info);
        if (err)
-               goto failed;
+               goto failed_videomem;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                err = -ENXIO;
-               goto failed;
+               goto failed_resource;
        }
 
        /*
@@ -532,7 +532,7 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
        fbi->mmio_base = ioremap(res->start, resource_size(res));
        if (!fbi->mmio_base) {
                err = -ENXIO;
-               goto failed;
+               goto failed_resource;
        }
 
        strcpy(info->fix.id, pdev->name);
@@ -553,24 +553,24 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
        if (err == 0) {
                dev_err(info->dev, "No suitable video mode found\n");
                err = -EINVAL;
-               goto failed;
+               goto failed_mode;
        }
 
        if (mach_info->setup) {
                err = mach_info->setup(pdev);
                if (err)
-                       return err;
+                       goto failed_mode;
        }
 
        err = ep93xxfb_check_var(&info->var, info);
        if (err)
-               goto failed;
+               goto failed_check;
 
        fbi->clk = clk_get(info->dev, NULL);
        if (IS_ERR(fbi->clk)) {
                err = PTR_ERR(fbi->clk);
                fbi->clk = NULL;
-               goto failed;
+               goto failed_check;
        }
 
        ep93xxfb_set_par(info);
@@ -585,15 +585,17 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
        return 0;
 
 failed:
-       if (fbi->clk)
-               clk_put(fbi->clk);
-       if (fbi->mmio_base)
-               iounmap(fbi->mmio_base);
-       ep93xxfb_dealloc_videomem(info);
-       if (&info->cmap)
-               fb_dealloc_cmap(&info->cmap);
+       clk_put(fbi->clk);
+failed_check:
        if (fbi->mach_info->teardown)
                fbi->mach_info->teardown(pdev);
+failed_mode:
+       iounmap(fbi->mmio_base);
+failed_resource:
+       ep93xxfb_dealloc_videomem(info);
+failed_videomem:
+       fb_dealloc_cmap(&info->cmap);
+failed_cmap:
        kfree(info);
        platform_set_drvdata(pdev, NULL);
 
index 2a4481cf260cc64f4fe19cbc50d674d5539df221..a36b2d28280edfb14c90c9491322fcd00e4eb9e5 100644 (file)
 
 #include <video/exynos_dp.h>
 
-#include <plat/cpu.h>
-
 #include "exynos_dp_core.h"
 
 static int exynos_dp_init_dp(struct exynos_dp_device *dp)
 {
        exynos_dp_reset(dp);
 
+       exynos_dp_swreset(dp);
+
        /* SW defined function Normal operation */
        exynos_dp_enable_sw_function(dp);
 
@@ -478,7 +478,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
        int lane_count;
        u8 buf[5];
 
-       u8 *adjust_request;
+       u8 adjust_request[2];
        u8 voltage_swing;
        u8 pre_emphasis;
        u8 training_lane;
@@ -493,8 +493,8 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
                /* set training pattern 2 for EQ */
                exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
 
-               adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
-                                               - DPCD_ADDR_LANE0_1_STATUS);
+               adjust_request[0] = link_status[4];
+               adjust_request[1] = link_status[5];
 
                exynos_dp_get_adjust_train(dp, adjust_request);
 
@@ -566,7 +566,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
        u8 buf[5];
        u32 reg;
 
-       u8 *adjust_request;
+       u8 adjust_request[2];
 
        udelay(400);
 
@@ -575,8 +575,8 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
        lane_count = dp->link_train.lane_count;
 
        if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
-               adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
-                                               - DPCD_ADDR_LANE0_1_STATUS);
+               adjust_request[0] = link_status[4];
+               adjust_request[1] = link_status[5];
 
                if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) {
                        /* traing pattern Set to Normal */
@@ -770,7 +770,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
                        return -ETIMEDOUT;
                }
 
-               mdelay(100);
+               udelay(1);
        }
 
        /* Set to use the register calculated M/N video */
@@ -804,7 +804,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
                        return -ETIMEDOUT;
                }
 
-               mdelay(100);
+               mdelay(1);
        }
 
        if (retval != 0)
@@ -860,7 +860,8 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL);
+       dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+                               GFP_KERNEL);
        if (!dp) {
                dev_err(&pdev->dev, "no memory for device data\n");
                return -ENOMEM;
@@ -871,8 +872,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
        dp->clock = clk_get(&pdev->dev, "dp");
        if (IS_ERR(dp->clock)) {
                dev_err(&pdev->dev, "failed to get clock\n");
-               ret = PTR_ERR(dp->clock);
-               goto err_dp;
+               return PTR_ERR(dp->clock);
        }
 
        clk_enable(dp->clock);
@@ -884,35 +884,25 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
                goto err_clock;
        }
 
-       res = request_mem_region(res->start, resource_size(res),
-                               dev_name(&pdev->dev));
-       if (!res) {
-               dev_err(&pdev->dev, "failed to request registers region\n");
-               ret = -EINVAL;
-               goto err_clock;
-       }
-
-       dp->res = res;
-
-       dp->reg_base = ioremap(res->start, resource_size(res));
+       dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
        if (!dp->reg_base) {
                dev_err(&pdev->dev, "failed to ioremap\n");
                ret = -ENOMEM;
-               goto err_req_region;
+               goto err_clock;
        }
 
        dp->irq = platform_get_irq(pdev, 0);
        if (!dp->irq) {
                dev_err(&pdev->dev, "failed to get irq\n");
                ret = -ENODEV;
-               goto err_ioremap;
+               goto err_clock;
        }
 
-       ret = request_irq(dp->irq, exynos_dp_irq_handler, 0,
-                       "exynos-dp", dp);
+       ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
+                               "exynos-dp", dp);
        if (ret) {
                dev_err(&pdev->dev, "failed to request irq\n");
-               goto err_ioremap;
+               goto err_clock;
        }
 
        dp->video_info = pdata->video_info;
@@ -924,7 +914,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
        ret = exynos_dp_detect_hpd(dp);
        if (ret) {
                dev_err(&pdev->dev, "unable to detect hpd\n");
-               goto err_irq;
+               goto err_clock;
        }
 
        exynos_dp_handle_edid(dp);
@@ -933,7 +923,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
                                dp->video_info->link_rate);
        if (ret) {
                dev_err(&pdev->dev, "unable to do link train\n");
-               goto err_irq;
+               goto err_clock;
        }
 
        exynos_dp_enable_scramble(dp, 1);
@@ -947,23 +937,15 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
        ret = exynos_dp_config_video(dp, dp->video_info);
        if (ret) {
                dev_err(&pdev->dev, "unable to config video\n");
-               goto err_irq;
+               goto err_clock;
        }
 
        platform_set_drvdata(pdev, dp);
 
        return 0;
 
-err_irq:
-       free_irq(dp->irq, dp);
-err_ioremap:
-       iounmap(dp->reg_base);
-err_req_region:
-       release_mem_region(res->start, resource_size(res));
 err_clock:
        clk_put(dp->clock);
-err_dp:
-       kfree(dp);
 
        return ret;
 }
@@ -976,16 +958,9 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
        if (pdata && pdata->phy_exit)
                pdata->phy_exit();
 
-       free_irq(dp->irq, dp);
-       iounmap(dp->reg_base);
-
        clk_disable(dp->clock);
        clk_put(dp->clock);
 
-       release_mem_region(dp->res->start, resource_size(dp->res));
-
-       kfree(dp);
-
        return 0;
 }
 
index 90ceaca0fa248c4a0a1197f51654655b0d3b52ac..1e0f998e0c9f4c872d132aafa640442a0b2d4189 100644 (file)
@@ -26,7 +26,6 @@ struct link_train {
 
 struct exynos_dp_device {
        struct device           *dev;
-       struct resource         *res;
        struct clk              *clock;
        unsigned int            irq;
        void __iomem            *reg_base;
@@ -39,8 +38,10 @@ struct exynos_dp_device {
 void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
 void exynos_dp_stop_video(struct exynos_dp_device *dp);
 void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
 void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
 void exynos_dp_reset(struct exynos_dp_device *dp);
+void exynos_dp_swreset(struct exynos_dp_device *dp);
 void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
 u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
 void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
index 6548afa0e3d21da1e543d2dd38c0b7de749b06d2..6ce76d56c3a1a2a7d3920d7eebfca34001b64dc7 100644 (file)
@@ -16,8 +16,6 @@
 
 #include <video/exynos_dp.h>
 
-#include <plat/cpu.h>
-
 #include "exynos_dp_core.h"
 #include "exynos_dp_reg.h"
 
@@ -65,6 +63,28 @@ void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
        writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
 }
 
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
+{
+       u32 reg;
+
+       reg = TX_TERMINAL_CTRL_50_OHM;
+       writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
+
+       reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+       writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
+
+       reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+       writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
+
+       reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+               TX_CUR1_2X | TX_CUR_8_MA;
+       writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
+
+       reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+               CH1_AMP_400_MV | CH0_AMP_400_MV;
+       writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
+}
+
 void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
 {
        /* Set interrupt pin assertion polarity as high */
@@ -89,8 +109,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
 {
        u32 reg;
 
-       writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
-
        exynos_dp_stop_video(dp);
        exynos_dp_enable_video_mute(dp, 0);
 
@@ -131,9 +149,15 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
 
        writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
 
+       exynos_dp_init_analog_param(dp);
        exynos_dp_init_interrupt(dp);
 }
 
+void exynos_dp_swreset(struct exynos_dp_device *dp)
+{
+       writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
+}
+
 void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
 {
        u32 reg;
@@ -271,6 +295,7 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
 void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
 {
        u32 reg;
+       int timeout_loop = 0;
 
        exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
 
@@ -282,9 +307,19 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
        writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
 
        /* Power up PLL */
-       if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED)
+       if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
                exynos_dp_set_pll_power_down(dp, 0);
 
+               while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+                       timeout_loop++;
+                       if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+                               dev_err(dp->dev, "failed to get pll lock status\n");
+                               return;
+                       }
+                       usleep_range(10, 20);
+               }
+       }
+
        /* Enable Serdes FIFO function and Link symbol clock domain module */
        reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
        reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
index 42f608e2a43e056809f8b01595db6ea8714ade4c..125b27cd57aebd3f44acf997cf08f711928b61dd 100644 (file)
 
 #define EXYNOS_DP_LANE_MAP                     0x35C
 
+#define EXYNOS_DP_ANALOG_CTL_1                 0x370
+#define EXYNOS_DP_ANALOG_CTL_2                 0x374
+#define EXYNOS_DP_ANALOG_CTL_3                 0x378
+#define EXYNOS_DP_PLL_FILTER_CTL_1             0x37C
+#define EXYNOS_DP_TX_AMP_TUNING_CTL            0x380
+
 #define EXYNOS_DP_AUX_HW_RETRY_CTL             0x390
 
 #define EXYNOS_DP_COMMON_INT_STA_1             0x3C4
 #define LANE0_MAP_LOGIC_LANE_2                 (0x2 << 0)
 #define LANE0_MAP_LOGIC_LANE_3                 (0x3 << 0)
 
+/* EXYNOS_DP_ANALOG_CTL_1 */
+#define TX_TERMINAL_CTRL_50_OHM                        (0x1 << 4)
+
+/* EXYNOS_DP_ANALOG_CTL_2 */
+#define SEL_24M                                        (0x1 << 3)
+#define TX_DVDD_BIT_1_0625V                    (0x4 << 0)
+
+/* EXYNOS_DP_ANALOG_CTL_3 */
+#define DRIVE_DVDD_BIT_1_0625V                 (0x4 << 5)
+#define VCO_BIT_600_MICRO                      (0x5 << 0)
+
+/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+#define PD_RING_OSC                            (0x1 << 6)
+#define AUX_TERMINAL_CTRL_50_OHM               (0x2 << 4)
+#define TX_CUR1_2X                             (0x1 << 2)
+#define TX_CUR_8_MA                            (0x2 << 0)
+
+/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+#define CH3_AMP_400_MV                         (0x0 << 24)
+#define CH2_AMP_400_MV                         (0x0 << 16)
+#define CH1_AMP_400_MV                         (0x0 << 8)
+#define CH0_AMP_400_MV                         (0x0 << 0)
+
 /* EXYNOS_DP_AUX_HW_RETRY_CTL */
 #define AUX_BIT_PERIOD_EXPECTED_DELAY(x)       (((x) & 0x7) << 8)
 #define AUX_HW_RETRY_INTERVAL_MASK             (0x3 << 3)
index 557091dc0e97382bcab08baa165d21ea6b5ff8f8..6c1f5c314a42b1eccd152cdd07b746883824fa24 100644 (file)
@@ -58,7 +58,7 @@ static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
 }
 
 static struct regulator_bulk_data supplies[] = {
-       { .supply = "vdd10", },
+       { .supply = "vdd11", },
        { .supply = "vdd18", },
 };
 
@@ -102,6 +102,8 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
        /* set display timing. */
        exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
 
+       exynos_mipi_dsi_init_interrupt(dsim);
+
        /*
         * data from Display controller(FIMD) is transferred in video mode
         * but in case of command mode, all settigs is updated to registers.
@@ -413,27 +415,30 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
                goto err_platform_get_irq;
        }
 
+       init_completion(&dsim_wr_comp);
+       init_completion(&dsim_rd_comp);
+       platform_set_drvdata(pdev, dsim);
+
        ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
-                       IRQF_SHARED, pdev->name, dsim);
+                       IRQF_SHARED, dev_name(&pdev->dev), dsim);
        if (ret != 0) {
                dev_err(&pdev->dev, "failed to request dsim irq\n");
                ret = -EINVAL;
                goto err_bind;
        }
 
-       init_completion(&dsim_wr_comp);
-       init_completion(&dsim_rd_comp);
-
-       /* enable interrupt */
+       /* enable interrupts */
        exynos_mipi_dsi_init_interrupt(dsim);
 
        /* initialize mipi-dsi client(lcd panel). */
        if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
                dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
 
-       /* in case that mipi got enabled at bootloader. */
-       if (dsim_pd->enabled)
-               goto out;
+       /* in case mipi-dsi has been enabled by bootloader */
+       if (dsim_pd->enabled) {
+               exynos_mipi_regulator_enable(dsim);
+               goto done;
+       }
 
        /* lcd panel power on. */
        if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
@@ -453,12 +458,11 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
 
        dsim->suspended = false;
 
-out:
+done:
        platform_set_drvdata(pdev, dsim);
 
-       dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n",
-               (dsim_config->e_interface == DSIM_COMMAND) ?
-                       "CPU" : "RGB");
+       dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__,
+               dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
 
        return 0;
 
@@ -515,10 +519,10 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
-               pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int exynos_mipi_dsi_suspend(struct device *dev)
 {
+       struct platform_device *pdev = to_platform_device(dev);
        struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
        struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
        struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -544,8 +548,9 @@ static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
        return 0;
 }
 
-static int exynos_mipi_dsi_resume(struct platform_device *pdev)
+static int exynos_mipi_dsi_resume(struct device *dev)
 {
+       struct platform_device *pdev = to_platform_device(dev);
        struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
        struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
        struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -577,19 +582,19 @@ static int exynos_mipi_dsi_resume(struct platform_device *pdev)
 
        return 0;
 }
-#else
-#define exynos_mipi_dsi_suspend NULL
-#define exynos_mipi_dsi_resume NULL
 #endif
 
+static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
+};
+
 static struct platform_driver exynos_mipi_dsi_driver = {
        .probe = exynos_mipi_dsi_probe,
        .remove = __devexit_p(exynos_mipi_dsi_remove),
-       .suspend = exynos_mipi_dsi_suspend,
-       .resume = exynos_mipi_dsi_resume,
        .driver = {
                   .name = "exynos-mipi-dsim",
                   .owner = THIS_MODULE,
+                  .pm = &exynos_mipi_dsi_pm_ops,
        },
 };
 
index 14909c1d38327fe0cd9cb8d975e66e47028c0f3f..47b533a183be2979ede8a29bb9db0d89b0e309bb 100644 (file)
@@ -76,33 +76,25 @@ static unsigned int dpll_table[15] = {
 
 irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
 {
-       unsigned int intsrc = 0;
-       unsigned int intmsk = 0;
-       struct mipi_dsim_device *dsim = NULL;
-
-       dsim = dev_id;
-       if (!dsim) {
-               dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n",
-                                                       __func__);
-               return IRQ_HANDLED;
+       struct mipi_dsim_device *dsim = dev_id;
+       unsigned int intsrc, intmsk;
+
+       if (dsim == NULL) {
+               dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
+               return IRQ_NONE;
        }
 
        intsrc = exynos_mipi_dsi_read_interrupt(dsim);
        intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
+       intmsk = ~intmsk & intsrc;
 
-       intmsk = ~(intmsk) & intsrc;
-
-       switch (intmsk) {
-       case INTMSK_RX_DONE:
+       if (intsrc & INTMSK_RX_DONE) {
                complete(&dsim_rd_comp);
                dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
-               break;
-       case INTMSK_FIFO_EMPTY:
+       }
+       if (intsrc & INTMSK_FIFO_EMPTY) {
                complete(&dsim_wr_comp);
                dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
-               break;
-       default:
-               break;
        }
 
        exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
@@ -738,11 +730,11 @@ int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
                if (dsim_config->auto_vertical_cnt == 0) {
                        exynos_mipi_dsi_set_main_disp_vporch(dsim,
                                dsim_config->cmd_allow,
-                               timing->upper_margin,
-                               timing->lower_margin);
+                               timing->lower_margin,
+                               timing->upper_margin);
                        exynos_mipi_dsi_set_main_disp_hporch(dsim,
-                               timing->left_margin,
-                               timing->right_margin);
+                               timing->right_margin,
+                               timing->left_margin);
                        exynos_mipi_dsi_set_main_disp_sync_area(dsim,
                                timing->vsync_len,
                                timing->hsync_len);
index 4aa9ac6218bfa2b41d51ffe78159142e0ae021cd..05d080b63bc0b85276c4ff999d93dd9ba1b76982 100644 (file)
@@ -293,9 +293,20 @@ static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
                0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
                0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
        };
+       static const unsigned char data_to_send_panel_reverse[] = {
+               0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
+               0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
+               0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
+               0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
+       };
 
-       ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-               data_to_send, ARRAY_SIZE(data_to_send));
+       if (lcd->dsim_dev->panel_reverse)
+               ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+                               data_to_send_panel_reverse,
+                               ARRAY_SIZE(data_to_send_panel_reverse));
+       else
+               ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+                               data_to_send, ARRAY_SIZE(data_to_send));
 }
 
 static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
index c27e153d8882053e2d28ab72cfc784875752f337..1ddeb11659d4db9e0023f07d98e5c7c189e97535 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/rmap.h>
 #include <linux/pagemap.h>
 
-struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
+static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
 {
        void *screen_base = (void __force *) info->screen_base;
        struct page *page;
@@ -107,6 +107,10 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
        /* protect against the workqueue changing the page list */
        mutex_lock(&fbdefio->lock);
 
+       /* first write in this cycle, notify the driver */
+       if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
+               fbdefio->first_io(info);
+
        /*
         * We want the page to remain locked from ->page_mkwrite until
         * the PTE is marked dirty to avoid page_mkclean() being called
index 67afa9c2289d539e281bb831aa2260c0fba1205b..a55e3669d1352f387c29e1342d4d54db429c2b3f 100644 (file)
@@ -80,6 +80,8 @@ EXPORT_SYMBOL(framebuffer_alloc);
  */
 void framebuffer_release(struct fb_info *info)
 {
+       if (!info)
+               return;
        kfree(info->apertures);
        kfree(info);
 }
index 6af3f16754f0e2bd8e5062312164f05a80219a78..458c00664ade6110a6b26d58397086b2b1d4290f 100644 (file)
@@ -834,7 +834,6 @@ static void update_lcdc(struct fb_info *info)
        diu_ops.set_pixel_clock(var->pixclock);
 
        out_be32(&hw->syn_pol, 0);      /* SYNC SIGNALS POLARITY */
-       out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
        out_be32(&hw->int_status, 0);   /* INTERRUPT STATUS */
        out_be32(&hw->plut, 0x01F5F666);
 
index 02fd2263610c1c63d1cdb8cd8173373a3ce88200..bdcbfbae277741e85b52f300f756d37bf3773a6c 100644 (file)
@@ -680,6 +680,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
                 + dinfo->fb.size);
        if (!dinfo->aperture.virtual) {
                ERR_MSG("Cannot remap FB region.\n");
+               agp_backend_release(bridge);
                cleanup(dinfo);
                return -ENODEV;
        }
@@ -689,6 +690,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
                                              INTEL_REG_SIZE);
        if (!dinfo->mmio_base) {
                ERR_MSG("Cannot remap MMIO region.\n");
+               agp_backend_release(bridge);
                cleanup(dinfo);
                return -ENODEV;
        }
index 31b8f67477b7957b8e193f757c4821e810b685a3..217678e0b983affe7ec7e1f1571c6a7811af5d5c 100644 (file)
@@ -1243,6 +1243,7 @@ static int maven_probe(struct i2c_client *client,
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
                                              I2C_FUNC_SMBUS_BYTE_DATA |
+                                             I2C_FUNC_NOSTART |
                                              I2C_FUNC_PROTOCOL_MANGLING))
                goto ERROR0;
        if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
index 273769bb8debce1e53704ae4c864e6d221a583a6..c87e17afb3e2c9c74d9131c578235672d325f3d8 100644 (file)
@@ -68,7 +68,7 @@ static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
        return 1;
 }
 
-void mb862xx_i2c_stop(struct i2c_adapter *adap)
+static void mb862xx_i2c_stop(struct i2c_adapter *adap)
 {
        struct mb862xxfb_par *par = adap->algo_data;
 
index 11a7a333701d3abc7da2aa57573344dc0576645c..00ce1f34b4965aa1d20f9bc33c709046551f2eca 100644 (file)
@@ -579,7 +579,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
 
 static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
 
-irqreturn_t mb862xx_intr(int irq, void *dev_id)
+static irqreturn_t mb862xx_intr(int irq, void *dev_id)
 {
        struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
        unsigned long reg_ist, mask;
index 55bf6196b7a0e1185c6237df1f3b4baae5f7cb97..85e4f44bfa61df1e5ab463b326d881bac37c544b 100644 (file)
@@ -950,7 +950,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
 
        mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
                                             res_size(mfbi->fb_req));
-       if (!mfbi->reg_virt_addr) {
+       if (!mfbi->fb_virt_addr) {
                dev_err(&dev->dev, "failed to ioremap frame buffer\n");
                ret = -EINVAL;
                goto err4;
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
 
 static struct platform_driver mbxfb_driver = {
        .probe = mbxfb_probe,
-       .remove = mbxfb_remove,
+       .remove = __devexit_p(mbxfb_remove),
        .suspend = mbxfb_suspend,
        .resume = mbxfb_resume,
        .driver = {
index 6c6bc578d0fcd3fbf6f008465d04e75bc8aeb2b3..abbe691047bde3ac8cdc05042a27873be649ea71 100644 (file)
@@ -889,6 +889,18 @@ static int __devexit mxsfb_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void mxsfb_shutdown(struct platform_device *pdev)
+{
+       struct fb_info *fb_info = platform_get_drvdata(pdev);
+       struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+       /*
+        * Force stop the LCD controller as keeping it running during reboot
+        * might interfere with the BootROM's boot mode pads sampling.
+        */
+       writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
+}
+
 static struct platform_device_id mxsfb_devtype[] = {
        {
                .name = "imx23-fb",
@@ -905,6 +917,7 @@ MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
 static struct platform_driver mxsfb_driver = {
        .probe = mxsfb_probe,
        .remove = __devexit_p(mxsfb_remove),
+       .shutdown = mxsfb_shutdown,
        .id_table = mxsfb_devtype,
        .driver = {
                   .name = DRIVER_NAME,
index 1e7536d9a8fcfeac9e9c808aeee9629363c92efa..b48f95f0dfe24d2685d8d6ee296b18b98eab9091 100644 (file)
@@ -39,14 +39,6 @@ config FB_OMAP_LCD_MIPID
          the Mobile Industry Processor Interface DBI-C/DCS
          specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
 
-config FB_OMAP_BOOTLOADER_INIT
-       bool "Check bootloader initialization"
-       depends on FB_OMAP
-       help
-         Say Y here if you want to enable checking if the bootloader has
-         already initialized the display controller. In this case the
-         driver will skip the initialization.
-
 config FB_OMAP_CONSISTENT_DMA_SIZE
        int "Consistent DMA memory size (MB)"
        depends on FB_OMAP
index 74e7cf078505650498387f4c7e43292419fcf60c..ad741c3d1ae1668f985c53e8a1f52df367e0ec59 100644 (file)
@@ -739,12 +739,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev,
        }
 }
 
-static void acx_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static int acx_panel_check_timings(struct omap_dss_device *dssdev,
                struct omap_video_timings *timings)
 {
@@ -762,7 +756,6 @@ static struct omap_dss_driver acx_panel_driver = {
        .resume         = acx_panel_resume,
 
        .set_timings    = acx_panel_set_timings,
-       .get_timings    = acx_panel_get_timings,
        .check_timings  = acx_panel_check_timings,
 
        .get_recommended_bpp = acx_get_recommended_bpp,
index 30fe4dfeb22700a92f8347f4cad682252e68df36..e42f9dc22123e319cde3eba127b0b57ccc05e910 100644 (file)
@@ -386,6 +386,106 @@ static struct panel_config generic_dpi_panels[] = {
 
                .name                   = "innolux_at080tn52",
        },
+
+       /* Mitsubishi AA084SB01 */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 600,
+                       .pixel_clock    = 40000,
+
+                       .hsw            = 1,
+                       .hfp            = 254,
+                       .hbp            = 1,
+
+                       .vsw            = 1,
+                       .vfp            = 26,
+                       .vbp            = 1,
+               },
+               .config                 = OMAP_DSS_LCD_TFT,
+               .name                   = "mitsubishi_aa084sb01",
+       },
+       /* EDT ET0500G0DH6 */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 480,
+                       .pixel_clock    = 33260,
+
+                       .hsw            = 128,
+                       .hfp            = 216,
+                       .hbp            = 40,
+
+                       .vsw            = 2,
+                       .vfp            = 35,
+                       .vbp            = 10,
+               },
+               .config                 = OMAP_DSS_LCD_TFT,
+               .name                   = "edt_et0500g0dh6",
+       },
+
+       /* Prime-View PD050VL1 */
+       {
+               {
+                       .x_res          = 640,
+                       .y_res          = 480,
+
+                       .pixel_clock    = 25000,
+
+                       .hsw            = 96,
+                       .hfp            = 18,
+                       .hbp            = 46,
+
+                       .vsw            = 2,
+                       .vfp            = 10,
+                       .vbp            = 33,
+               },
+               .config                 = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+                                         OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+               .name                   = "primeview_pd050vl1",
+       },
+
+       /* Prime-View PM070WL4 */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 480,
+
+                       .pixel_clock    = 32000,
+
+                       .hsw            = 128,
+                       .hfp            = 42,
+                       .hbp            = 86,
+
+                       .vsw            = 2,
+                       .vfp            = 10,
+                       .vbp            = 33,
+               },
+               .config                 = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+                                         OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+               .name                   = "primeview_pm070wl4",
+       },
+
+       /* Prime-View PD104SLF */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 600,
+
+                       .pixel_clock    = 40000,
+
+                       .hsw            = 128,
+                       .hfp            = 42,
+                       .hbp            = 86,
+
+                       .vsw            = 4,
+                       .vfp            = 1,
+                       .vbp            = 23,
+               },
+               .config                 = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+                                         OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+               .name                   = "primeview_pd104slf",
+       },
 };
 
 struct panel_drv_data {
@@ -549,12 +649,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
        dpi_set_timings(dssdev, timings);
 }
 
-static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
                struct omap_video_timings *timings)
 {
@@ -571,7 +665,6 @@ static struct omap_dss_driver dpi_driver = {
        .resume         = generic_dpi_panel_resume,
 
        .set_timings    = generic_dpi_panel_set_timings,
-       .get_timings    = generic_dpi_panel_get_timings,
        .check_timings  = generic_dpi_panel_check_timings,
 
        .driver         = {
index dc9408dc93d1ed36f702187aebab9e5d7605b84b..4a34cdc1371b34c777c8b4747bd669186cd2d530 100644 (file)
@@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
@@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
        .get_resolution = n8x0_panel_get_resolution,
        .get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-       .get_timings    = n8x0_panel_get_timings,
-
        .driver         = {
                .name   = "n8x0_panel",
                .owner  = THIS_MODULE,
index b2dd88b484209b46f5b213e9007cb2108b438c34..901576eb5a8425995e57f680715c649e68dc0a54 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/gpio.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
-#include <linux/regulator/consumer.h>
 #include <linux/mutex.h>
 
 #include <video/omapdss.h>
@@ -55,73 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
 
 static int taal_panel_reset(struct omap_dss_device *dssdev);
 
-struct panel_regulator {
-       struct regulator *regulator;
-       const char *name;
-       int min_uV;
-       int max_uV;
-};
-
-static void free_regulators(struct panel_regulator *regulators, int n)
-{
-       int i;
-
-       for (i = 0; i < n; i++) {
-               /* disable/put in reverse order */
-               regulator_disable(regulators[n - i - 1].regulator);
-               regulator_put(regulators[n - i - 1].regulator);
-       }
-}
-
-static int init_regulators(struct omap_dss_device *dssdev,
-                       struct panel_regulator *regulators, int n)
-{
-       int r, i, v;
-
-       for (i = 0; i < n; i++) {
-               struct regulator *reg;
-
-               reg = regulator_get(&dssdev->dev, regulators[i].name);
-               if (IS_ERR(reg)) {
-                       dev_err(&dssdev->dev, "failed to get regulator %s\n",
-                               regulators[i].name);
-                       r = PTR_ERR(reg);
-                       goto err;
-               }
-
-               /* FIXME: better handling of fixed vs. variable regulators */
-               v = regulator_get_voltage(reg);
-               if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
-                       r = regulator_set_voltage(reg, regulators[i].min_uV,
-                                               regulators[i].max_uV);
-                       if (r) {
-                               dev_err(&dssdev->dev,
-                                       "failed to set regulator %s voltage\n",
-                                       regulators[i].name);
-                               regulator_put(reg);
-                               goto err;
-                       }
-               }
-
-               r = regulator_enable(reg);
-               if (r) {
-                       dev_err(&dssdev->dev, "failed to enable regulator %s\n",
-                               regulators[i].name);
-                       regulator_put(reg);
-                       goto err;
-               }
-
-               regulators[i].regulator = reg;
-       }
-
-       return 0;
-
-err:
-       free_regulators(regulators, i);
-
-       return r;
-}
-
 /**
  * struct panel_config - panel configuration
  * @name: panel name
@@ -150,8 +82,6 @@ struct panel_config {
                unsigned int low;
        } reset_sequence;
 
-       struct panel_regulator *regulators;
-       int num_regulators;
 };
 
 enum {
@@ -577,12 +507,6 @@ static const struct backlight_ops taal_bl_ops = {
        .update_status  = taal_bl_update_status,
 };
 
-static void taal_get_timings(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void taal_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
@@ -602,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
 {
        struct omap_dss_device *dssdev = to_dss_device(dev);
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       u8 errors;
+       u8 errors = 0;
        int r;
 
        mutex_lock(&td->lock);
@@ -977,11 +901,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
 
        atomic_set(&td->do_update, 0);
 
-       r = init_regulators(dssdev, panel_config->regulators,
-                       panel_config->num_regulators);
-       if (r)
-               goto err_reg;
-
        td->workqueue = create_singlethread_workqueue("taal_esd");
        if (td->workqueue == NULL) {
                dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -1087,8 +1006,6 @@ err_bl:
 err_rst_gpio:
        destroy_workqueue(td->workqueue);
 err_wq:
-       free_regulators(panel_config->regulators, panel_config->num_regulators);
-err_reg:
        kfree(td);
 err:
        return r;
@@ -1125,9 +1042,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
        /* reset, to be sure that the panel is in a valid state */
        taal_hw_reset(dssdev);
 
-       free_regulators(td->panel_config->regulators,
-                       td->panel_config->num_regulators);
-
        if (gpio_is_valid(panel_data->reset_gpio))
                gpio_free(panel_data->reset_gpio);
 
@@ -1909,8 +1823,6 @@ static struct omap_dss_driver taal_driver = {
        .run_test       = taal_run_test,
        .memory_read    = taal_memory_read,
 
-       .get_timings    = taal_get_timings,
-
        .driver         = {
                .name   = "taal",
                .owner  = THIS_MODULE,
index 52637fa8fda83cfccaf8d70ddbf7c4ca26a7baf1..bff306e041cabef157929f4078401122c06510d9 100644 (file)
@@ -47,13 +47,9 @@ struct panel_drv_data {
        struct mutex lock;
 
        int pd_gpio;
-};
 
-static inline struct tfp410_platform_data
-*get_pdata(const struct omap_dss_device *dssdev)
-{
-       return dssdev->data;
-}
+       struct i2c_adapter *i2c_adapter;
+};
 
 static int tfp410_power_on(struct omap_dss_device *dssdev)
 {
@@ -68,7 +64,7 @@ static int tfp410_power_on(struct omap_dss_device *dssdev)
                goto err0;
 
        if (gpio_is_valid(ddata->pd_gpio))
-               gpio_set_value(ddata->pd_gpio, 1);
+               gpio_set_value_cansleep(ddata->pd_gpio, 1);
 
        return 0;
 err0:
@@ -83,18 +79,18 @@ static void tfp410_power_off(struct omap_dss_device *dssdev)
                return;
 
        if (gpio_is_valid(ddata->pd_gpio))
-               gpio_set_value(ddata->pd_gpio, 0);
+               gpio_set_value_cansleep(ddata->pd_gpio, 0);
 
        omapdss_dpi_display_disable(dssdev);
 }
 
 static int tfp410_probe(struct omap_dss_device *dssdev)
 {
-       struct tfp410_platform_data *pdata = get_pdata(dssdev);
        struct panel_drv_data *ddata;
        int r;
+       int i2c_bus_num;
 
-       ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
+       ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
                return -ENOMEM;
 
@@ -104,10 +100,15 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
        ddata->dssdev = dssdev;
        mutex_init(&ddata->lock);
 
-       if (pdata)
+       if (dssdev->data) {
+               struct tfp410_platform_data *pdata = dssdev->data;
+
                ddata->pd_gpio = pdata->power_down_gpio;
-       else
+               i2c_bus_num = pdata->i2c_bus_num;
+       } else {
                ddata->pd_gpio = -1;
+               i2c_bus_num = -1;
+       }
 
        if (gpio_is_valid(ddata->pd_gpio)) {
                r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW,
@@ -115,13 +116,31 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
                if (r) {
                        dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
                                        ddata->pd_gpio);
-                       ddata->pd_gpio = -1;
+                       return r;
                }
        }
 
+       if (i2c_bus_num != -1) {
+               struct i2c_adapter *adapter;
+
+               adapter = i2c_get_adapter(i2c_bus_num);
+               if (!adapter) {
+                       dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
+                                       i2c_bus_num);
+                       r = -EINVAL;
+                       goto err_i2c;
+               }
+
+               ddata->i2c_adapter = adapter;
+       }
+
        dev_set_drvdata(&dssdev->dev, ddata);
 
        return 0;
+err_i2c:
+       if (gpio_is_valid(ddata->pd_gpio))
+               gpio_free(ddata->pd_gpio);
+       return r;
 }
 
 static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -130,14 +149,15 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev)
 
        mutex_lock(&ddata->lock);
 
+       if (ddata->i2c_adapter)
+               i2c_put_adapter(ddata->i2c_adapter);
+
        if (gpio_is_valid(ddata->pd_gpio))
                gpio_free(ddata->pd_gpio);
 
        dev_set_drvdata(&dssdev->dev, NULL);
 
        mutex_unlock(&ddata->lock);
-
-       kfree(ddata);
 }
 
 static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -269,27 +289,17 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
                u8 *edid, int len)
 {
        struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-       struct tfp410_platform_data *pdata = get_pdata(dssdev);
-       struct i2c_adapter *adapter;
        int r, l, bytes_read;
 
        mutex_lock(&ddata->lock);
 
-       if (pdata->i2c_bus_num == 0) {
+       if (!ddata->i2c_adapter) {
                r = -ENODEV;
                goto err;
        }
 
-       adapter = i2c_get_adapter(pdata->i2c_bus_num);
-       if (!adapter) {
-               dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
-                               pdata->i2c_bus_num);
-               r = -EINVAL;
-               goto err;
-       }
-
        l = min(EDID_LENGTH, len);
-       r = tfp410_ddc_read(adapter, edid, l, 0);
+       r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
        if (r)
                goto err;
 
@@ -299,7 +309,7 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
        if (len > EDID_LENGTH && edid[0x7e] > 0) {
                l = min(EDID_LENGTH, len - EDID_LENGTH);
 
-               r = tfp410_ddc_read(adapter, edid + EDID_LENGTH,
+               r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
                                l, EDID_LENGTH);
                if (r)
                        goto err;
@@ -319,21 +329,15 @@ err:
 static bool tfp410_detect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-       struct tfp410_platform_data *pdata = get_pdata(dssdev);
-       struct i2c_adapter *adapter;
        unsigned char out;
        int r;
 
        mutex_lock(&ddata->lock);
 
-       if (pdata->i2c_bus_num == 0)
-               goto out;
-
-       adapter = i2c_get_adapter(pdata->i2c_bus_num);
-       if (!adapter)
+       if (!ddata->i2c_adapter)
                goto out;
 
-       r = tfp410_ddc_read(adapter, &out, 1, 0);
+       r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
 
        mutex_unlock(&ddata->lock);
 
index 32f3fcd7f0f0adc8361523035ce5435412578424..4b6448b3c31f224f0919cd49d345eee9f61f52ae 100644 (file)
@@ -272,13 +272,16 @@ static const struct omap_video_timings tpo_td043_timings = {
 static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 {
        int nreset_gpio = tpo_td043->nreset_gpio;
+       int r;
 
        if (tpo_td043->powered_on)
                return 0;
 
-       regulator_enable(tpo_td043->vcc_reg);
+       r = regulator_enable(tpo_td043->vcc_reg);
+       if (r != 0)
+               return r;
 
-       /* wait for regulator to stabilize */
+       /* wait for panel to stabilize */
        msleep(160);
 
        if (gpio_is_valid(nreset_gpio))
@@ -470,6 +473,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
                gpio_free(nreset_gpio);
 }
 
+static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       dpi_set_timings(dssdev, timings);
+}
+
+static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       return dpi_check_timings(dssdev, timings);
+}
+
 static struct omap_dss_driver tpo_td043_driver = {
        .probe          = tpo_td043_probe,
        .remove         = tpo_td043_remove,
@@ -481,6 +496,9 @@ static struct omap_dss_driver tpo_td043_driver = {
        .set_mirror     = tpo_td043_set_hmirror,
        .get_mirror     = tpo_td043_get_hmirror,
 
+       .set_timings    = tpo_td043_set_timings,
+       .check_timings  = tpo_td043_check_timings,
+
        .driver         = {
                .name   = "tpo_td043mtea1_panel",
                .owner  = THIS_MODULE,
index 7be7c06a249ecd9cde3487a4862e1e1f9a2a2673..43324e5ed25fc2bad6712eff76397b92da97bbba 100644 (file)
@@ -68,6 +68,10 @@ config OMAP4_DSS_HDMI
          HDMI Interface. This adds the High Definition Multimedia Interface.
          See http://www.hdmi.org/ for HDMI specification.
 
+config OMAP4_DSS_HDMI_AUDIO
+       bool
+       depends on OMAP4_DSS_HDMI
+
 config OMAP2_DSS_SDI
        bool "SDI support"
        depends on ARCH_OMAP3
@@ -90,15 +94,6 @@ config OMAP2_DSS_DSI
 
          See http://www.mipi.org/ for DSI spesifications.
 
-config OMAP2_DSS_FAKE_VSYNC
-       bool "Fake VSYNC irq from manual update displays"
-       default n
-       help
-         If this is selected, DSI will generate a fake DISPC VSYNC interrupt
-         when DSI has sent a frame. This is only needed with DSI or RFBI
-         displays using manual mode, and you want VSYNC to, for example,
-         time animation.
-
 config OMAP2_DSS_MIN_FCK_PER_PCK
        int "Minimum FCK/PCK ratio (for scaling)"
        range 0 32
index b10b3bc1931e6ce0217172475155c3d07a4c7fe0..ab22cc224f3eb8259a7d42dd2c841687703ffab7 100644 (file)
@@ -99,6 +99,11 @@ struct mgr_priv_data {
 
        /* If true, a display is enabled using this manager */
        bool enabled;
+
+       bool extra_info_dirty;
+       bool shadow_extra_info_dirty;
+
+       struct omap_video_timings timings;
 };
 
 static struct {
@@ -176,7 +181,7 @@ static bool mgr_manual_update(struct omap_overlay_manager *mgr)
 }
 
 static int dss_check_settings_low(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev, bool applying)
+               bool applying)
 {
        struct omap_overlay_info *oi;
        struct omap_overlay_manager_info *mi;
@@ -187,6 +192,9 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
 
        mp = get_mgr_priv(mgr);
 
+       if (!mp->enabled)
+               return 0;
+
        if (applying && mp->user_info_dirty)
                mi = &mp->user_info;
        else
@@ -206,26 +214,24 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
                ois[ovl->id] = oi;
        }
 
-       return dss_mgr_check(mgr, dssdev, mi, ois);
+       return dss_mgr_check(mgr, mi, &mp->timings, ois);
 }
 
 /*
  * check manager and overlay settings using overlay_info from data->info
  */
-static int dss_check_settings(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev)
+static int dss_check_settings(struct omap_overlay_manager *mgr)
 {
-       return dss_check_settings_low(mgr, dssdev, false);
+       return dss_check_settings_low(mgr, false);
 }
 
 /*
  * check manager and overlay settings using overlay_info from ovl->info if
  * dirty and from data->info otherwise
  */
-static int dss_check_settings_apply(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev)
+static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
 {
-       return dss_check_settings_low(mgr, dssdev, true);
+       return dss_check_settings_low(mgr, true);
 }
 
 static bool need_isr(void)
@@ -261,6 +267,20 @@ static bool need_isr(void)
                        if (mp->shadow_info_dirty)
                                return true;
 
+                       /*
+                        * NOTE: we don't check extra_info flags for disabled
+                        * managers, once the manager is enabled, the extra_info
+                        * related manager changes will be taken in by HW.
+                        */
+
+                       /* to write new values to registers */
+                       if (mp->extra_info_dirty)
+                               return true;
+
+                       /* to set GO bit */
+                       if (mp->shadow_extra_info_dirty)
+                               return true;
+
                        list_for_each_entry(ovl, &mgr->overlays, list) {
                                struct ovl_priv_data *op;
 
@@ -305,7 +325,7 @@ static bool need_go(struct omap_overlay_manager *mgr)
 
        mp = get_mgr_priv(mgr);
 
-       if (mp->shadow_info_dirty)
+       if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
                return true;
 
        list_for_each_entry(ovl, &mgr->overlays, list) {
@@ -320,20 +340,16 @@ static bool need_go(struct omap_overlay_manager *mgr)
 /* returns true if an extra_info field is currently being updated */
 static bool extra_info_update_ongoing(void)
 {
-       const int num_ovls = omap_dss_get_num_overlays();
-       struct ovl_priv_data *op;
-       struct omap_overlay *ovl;
-       struct mgr_priv_data *mp;
+       const int num_mgrs = dss_feat_get_num_mgrs();
        int i;
 
-       for (i = 0; i < num_ovls; ++i) {
-               ovl = omap_dss_get_overlay(i);
-               op = get_ovl_priv(ovl);
-
-               if (!ovl->manager)
-                       continue;
+       for (i = 0; i < num_mgrs; ++i) {
+               struct omap_overlay_manager *mgr;
+               struct omap_overlay *ovl;
+               struct mgr_priv_data *mp;
 
-               mp = get_mgr_priv(ovl->manager);
+               mgr = omap_dss_get_overlay_manager(i);
+               mp = get_mgr_priv(mgr);
 
                if (!mp->enabled)
                        continue;
@@ -341,8 +357,15 @@ static bool extra_info_update_ongoing(void)
                if (!mp->updating)
                        continue;
 
-               if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+               if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
                        return true;
+
+               list_for_each_entry(ovl, &mgr->overlays, list) {
+                       struct ovl_priv_data *op = get_ovl_priv(ovl);
+
+                       if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+                               return true;
+               }
        }
 
        return false;
@@ -525,11 +548,13 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
 
        oi = &op->info;
 
+       mp = get_mgr_priv(ovl->manager);
+
        replication = dss_use_replication(ovl->manager->device, oi->color_mode);
 
        ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
 
-       r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
+       r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
        if (r) {
                /*
                 * We can't do much here, as this function can be called from
@@ -543,8 +568,6 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
                return;
        }
 
-       mp = get_mgr_priv(ovl->manager);
-
        op->info_dirty = false;
        if (mp->updating)
                op->shadow_info_dirty = true;
@@ -601,6 +624,22 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
        }
 }
 
+static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
+{
+       struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+       DSSDBGF("%d", mgr->id);
+
+       if (!mp->extra_info_dirty)
+               return;
+
+       dispc_mgr_set_timings(mgr->id, &mp->timings);
+
+       mp->extra_info_dirty = false;
+       if (mp->updating)
+               mp->shadow_extra_info_dirty = true;
+}
+
 static void dss_write_regs_common(void)
 {
        const int num_mgrs = omap_dss_get_num_overlay_managers();
@@ -646,7 +685,7 @@ static void dss_write_regs(void)
                if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
                        continue;
 
-               r = dss_check_settings(mgr, mgr->device);
+               r = dss_check_settings(mgr);
                if (r) {
                        DSSERR("cannot write registers for manager %s: "
                                        "illegal configuration\n", mgr->name);
@@ -654,6 +693,7 @@ static void dss_write_regs(void)
                }
 
                dss_mgr_write_regs(mgr);
+               dss_mgr_write_regs_extra(mgr);
        }
 }
 
@@ -693,6 +733,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
 
        mp = get_mgr_priv(mgr);
        mp->shadow_info_dirty = false;
+       mp->shadow_extra_info_dirty = false;
 
        list_for_each_entry(ovl, &mgr->overlays, list) {
                op = get_ovl_priv(ovl);
@@ -711,7 +752,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
 
        WARN_ON(mp->updating);
 
-       r = dss_check_settings(mgr, mgr->device);
+       r = dss_check_settings(mgr);
        if (r) {
                DSSERR("cannot start manual update: illegal configuration\n");
                spin_unlock_irqrestore(&data_lock, flags);
@@ -719,6 +760,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
        }
 
        dss_mgr_write_regs(mgr);
+       dss_mgr_write_regs_extra(mgr);
 
        dss_write_regs_common();
 
@@ -857,7 +899,7 @@ int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
 
        spin_lock_irqsave(&data_lock, flags);
 
-       r = dss_check_settings_apply(mgr, mgr->device);
+       r = dss_check_settings_apply(mgr);
        if (r) {
                spin_unlock_irqrestore(&data_lock, flags);
                DSSERR("failed to apply settings: illegal configuration.\n");
@@ -918,16 +960,13 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
                bool use_fifo_merge)
 {
        struct ovl_priv_data *op = get_ovl_priv(ovl);
-       struct omap_dss_device *dssdev;
        u32 fifo_low, fifo_high;
 
        if (!op->enabled && !op->enabling)
                return;
 
-       dssdev = ovl->manager->device;
-
        dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
-                       use_fifo_merge);
+                       use_fifo_merge, ovl_manual_update(ovl));
 
        dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
 }
@@ -1050,7 +1089,7 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
 
        mp->enabled = true;
 
-       r = dss_check_settings(mgr, mgr->device);
+       r = dss_check_settings(mgr);
        if (r) {
                DSSERR("failed to enable manager %d: check_settings failed\n",
                                mgr->id);
@@ -1225,6 +1264,35 @@ err:
        return r;
 }
 
+static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings)
+{
+       struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+       mp->timings = *timings;
+       mp->extra_info_dirty = true;
+}
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings)
+{
+       unsigned long flags;
+
+       mutex_lock(&apply_lock);
+
+       spin_lock_irqsave(&data_lock, flags);
+
+       dss_apply_mgr_timings(mgr, timings);
+
+       dss_write_regs();
+       dss_set_go_bits();
+
+       spin_unlock_irqrestore(&data_lock, flags);
+
+       wait_pending_extra_info_updates();
+
+       mutex_unlock(&apply_lock);
+}
 
 int dss_ovl_set_info(struct omap_overlay *ovl,
                struct omap_overlay_info *info)
@@ -1393,7 +1461,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
 
        op->enabling = true;
 
-       r = dss_check_settings(ovl->manager, ovl->manager->device);
+       r = dss_check_settings(ovl->manager);
        if (r) {
                DSSERR("failed to enable overlay %d: check_settings failed\n",
                                ovl->id);
index e8a120771ac6fc0cdb4fc537aca478ecde0d6ab2..58bd9c27369df9d4a39d1352f9d9cad74ec51b37 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
 
 #include <video/omapdss.h>
 
@@ -43,6 +44,8 @@ static struct {
 
        struct regulator *vdds_dsi_reg;
        struct regulator *vdds_sdi_reg;
+
+       const char *default_display_name;
 } core;
 
 static char *def_disp_name;
@@ -54,9 +57,6 @@ bool dss_debug;
 module_param_named(debug, dss_debug, bool, 0644);
 #endif
 
-static int omap_dss_register_device(struct omap_dss_device *);
-static void omap_dss_unregister_device(struct omap_dss_device *);
-
 /* REGULATORS */
 
 struct regulator *dss_get_vdds_dsi(void)
@@ -87,6 +87,51 @@ struct regulator *dss_get_vdds_sdi(void)
        return reg;
 }
 
+int dss_get_ctx_loss_count(struct device *dev)
+{
+       struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+       int cnt;
+
+       if (!board_data->get_context_loss_count)
+               return -ENOENT;
+
+       cnt = board_data->get_context_loss_count(dev);
+
+       WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+       return cnt;
+}
+
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
+{
+       struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+       if (!board_data->dsi_enable_pads)
+               return -ENOENT;
+
+       return board_data->dsi_enable_pads(dsi_id, lane_mask);
+}
+
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
+{
+       struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+       if (!board_data->dsi_enable_pads)
+               return;
+
+       return board_data->dsi_disable_pads(dsi_id, lane_mask);
+}
+
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+       struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+
+       if (pdata->set_min_bus_tput)
+               return pdata->set_min_bus_tput(dev, tput);
+       else
+               return 0;
+}
+
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 static int dss_debug_show(struct seq_file *s, void *unused)
 {
@@ -121,34 +166,6 @@ static int dss_initialize_debugfs(void)
        debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
                        &dss_debug_dump_clocks, &dss_debug_fops);
 
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-       debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
-                       &dispc_dump_irqs, &dss_debug_fops);
-#endif
-
-#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
-       dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
-#endif
-
-       debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
-                       &dss_dump_regs, &dss_debug_fops);
-       debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
-                       &dispc_dump_regs, &dss_debug_fops);
-#ifdef CONFIG_OMAP2_DSS_RFBI
-       debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
-                       &rfbi_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
-       dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
-       debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
-                       &venc_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
-       debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
-                       &hdmi_dump_regs, &dss_debug_fops);
-#endif
        return 0;
 }
 
@@ -157,6 +174,19 @@ static void dss_uninitialize_debugfs(void)
        if (dss_debugfs_dir)
                debugfs_remove_recursive(dss_debugfs_dir);
 }
+
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+       struct dentry *d;
+
+       d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
+                       write, &dss_debug_fops);
+
+       if (IS_ERR(d))
+               return PTR_ERR(d);
+
+       return 0;
+}
 #else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 static inline int dss_initialize_debugfs(void)
 {
@@ -165,14 +195,39 @@ static inline int dss_initialize_debugfs(void)
 static inline void dss_uninitialize_debugfs(void)
 {
 }
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+       return 0;
+}
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
-static int omap_dss_probe(struct platform_device *pdev)
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+       DSSDBG("pm notif %lu\n", v);
+
+       switch (v) {
+       case PM_SUSPEND_PREPARE:
+               DSSDBG("suspending displays\n");
+               return dss_suspend_all_devices();
+
+       case PM_POST_SUSPEND:
+               DSSDBG("resuming displays\n");
+               return dss_resume_all_devices();
+
+       default:
+               return 0;
+       }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+       .notifier_call = omap_dss_pm_notif,
+};
+
+static int __init omap_dss_probe(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        int r;
-       int i;
 
        core.pdev = pdev;
 
@@ -187,28 +242,15 @@ static int omap_dss_probe(struct platform_device *pdev)
        if (r)
                goto err_debugfs;
 
-       for (i = 0; i < pdata->num_devices; ++i) {
-               struct omap_dss_device *dssdev = pdata->devices[i];
-
-               r = omap_dss_register_device(dssdev);
-               if (r) {
-                       DSSERR("device %d %s register failed %d\n", i,
-                               dssdev->name ?: "unnamed", r);
+       if (def_disp_name)
+               core.default_display_name = def_disp_name;
+       else if (pdata->default_device)
+               core.default_display_name = pdata->default_device->name;
 
-                       while (--i >= 0)
-                               omap_dss_unregister_device(pdata->devices[i]);
-
-                       goto err_register;
-               }
-
-               if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
-                       pdata->default_device = dssdev;
-       }
+       register_pm_notifier(&omap_dss_pm_notif_block);
 
        return 0;
 
-err_register:
-       dss_uninitialize_debugfs();
 err_debugfs:
 
        return r;
@@ -216,17 +258,13 @@ err_debugfs:
 
 static int omap_dss_remove(struct platform_device *pdev)
 {
-       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
-       int i;
+       unregister_pm_notifier(&omap_dss_pm_notif_block);
 
        dss_uninitialize_debugfs();
 
        dss_uninit_overlays(pdev);
        dss_uninit_overlay_managers(pdev);
 
-       for (i = 0; i < pdata->num_devices; ++i)
-               omap_dss_unregister_device(pdata->devices[i]);
-
        return 0;
 }
 
@@ -236,26 +274,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
        dss_disable_all_devices();
 }
 
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       DSSDBG("suspend %d\n", state.event);
-
-       return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
-       DSSDBG("resume\n");
-
-       return dss_resume_all_devices();
-}
-
 static struct platform_driver omap_dss_driver = {
-       .probe          = omap_dss_probe,
        .remove         = omap_dss_remove,
        .shutdown       = omap_dss_shutdown,
-       .suspend        = omap_dss_suspend,
-       .resume         = omap_dss_resume,
        .driver         = {
                .name   = "omapdss",
                .owner  = THIS_MODULE,
@@ -326,7 +347,6 @@ static int dss_driver_probe(struct device *dev)
        int r;
        struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
        struct omap_dss_device *dssdev = to_dss_device(dev);
-       struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
        bool force;
 
        DSSDBG("driver_probe: dev %s/%s, drv %s\n",
@@ -335,7 +355,8 @@ static int dss_driver_probe(struct device *dev)
 
        dss_init_device(core.pdev, dssdev);
 
-       force = pdata->default_device == dssdev;
+       force = core.default_display_name &&
+               strcmp(core.default_display_name, dssdev->name) == 0;
        dss_recheck_connections(dssdev, force);
 
        r = dssdrv->probe(dssdev);
@@ -381,6 +402,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
        if (dssdriver->get_recommended_bpp == NULL)
                dssdriver->get_recommended_bpp =
                        omapdss_default_get_recommended_bpp;
+       if (dssdriver->get_timings == NULL)
+               dssdriver->get_timings = omapdss_default_get_timings;
 
        return driver_register(&dssdriver->driver);
 }
@@ -427,27 +450,38 @@ static void omap_dss_dev_release(struct device *dev)
        reset_device(dev, 0);
 }
 
-static int omap_dss_register_device(struct omap_dss_device *dssdev)
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+               struct device *parent, int disp_num)
 {
-       static int dev_num;
-
        WARN_ON(!dssdev->driver_name);
 
        reset_device(&dssdev->dev, 1);
        dssdev->dev.bus = &dss_bus_type;
-       dssdev->dev.parent = &dss_bus;
+       dssdev->dev.parent = parent;
        dssdev->dev.release = omap_dss_dev_release;
-       dev_set_name(&dssdev->dev, "display%d", dev_num++);
+       dev_set_name(&dssdev->dev, "display%d", disp_num);
        return device_register(&dssdev->dev);
 }
 
-static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
+void omap_dss_unregister_device(struct omap_dss_device *dssdev)
 {
        device_unregister(&dssdev->dev);
 }
 
+static int dss_unregister_dss_dev(struct device *dev, void *data)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       omap_dss_unregister_device(dssdev);
+       return 0;
+}
+
+void omap_dss_unregister_child_devices(struct device *parent)
+{
+       device_for_each_child(parent, NULL, dss_unregister_dss_dev);
+}
+
 /* BUS */
-static int omap_dss_bus_register(void)
+static int __init omap_dss_bus_register(void)
 {
        int r;
 
@@ -469,12 +503,56 @@ static int omap_dss_bus_register(void)
 }
 
 /* INIT */
+static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+       dpi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+       sdi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+       rfbi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+       venc_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+       dsi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+       hdmi_init_platform_driver,
+#endif
+};
+
+static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+       dpi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+       sdi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+       rfbi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+       venc_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+       dsi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+       hdmi_uninit_platform_driver,
+#endif
+};
+
+static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
 
 static int __init omap_dss_register_drivers(void)
 {
        int r;
+       int i;
 
-       r = platform_driver_register(&omap_dss_driver);
+       r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
        if (r)
                return r;
 
@@ -490,40 +568,18 @@ static int __init omap_dss_register_drivers(void)
                goto err_dispc;
        }
 
-       r = rfbi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize rfbi platform driver\n");
-               goto err_rfbi;
-       }
-
-       r = venc_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize venc platform driver\n");
-               goto err_venc;
-       }
-
-       r = dsi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize DSI platform driver\n");
-               goto err_dsi;
-       }
-
-       r = hdmi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize hdmi\n");
-               goto err_hdmi;
+       /*
+        * It's ok if the output-driver register fails. It happens, for example,
+        * when there is no output-device (e.g. SDI for OMAP4).
+        */
+       for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
+               r = dss_output_drv_reg_funcs[i]();
+               if (r == 0)
+                       dss_output_drv_loaded[i] = true;
        }
 
        return 0;
 
-err_hdmi:
-       dsi_uninit_platform_driver();
-err_dsi:
-       venc_uninit_platform_driver();
-err_venc:
-       rfbi_uninit_platform_driver();
-err_rfbi:
-       dispc_uninit_platform_driver();
 err_dispc:
        dss_uninit_platform_driver();
 err_dss:
@@ -534,10 +590,13 @@ err_dss:
 
 static void __exit omap_dss_unregister_drivers(void)
 {
-       hdmi_uninit_platform_driver();
-       dsi_uninit_platform_driver();
-       venc_uninit_platform_driver();
-       rfbi_uninit_platform_driver();
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) {
+               if (dss_output_drv_loaded[i])
+                       dss_output_drv_unreg_funcs[i]();
+       }
+
        dispc_uninit_platform_driver();
        dss_uninit_platform_driver();
 
index ee30937482e1156240de18ccaff3f889a1e4637b..397d4eee11bb7715d9d01e3b8a1330abcdc2f2e4 100644 (file)
@@ -131,23 +131,6 @@ static inline u32 dispc_read_reg(const u16 idx)
        return __raw_readl(dispc.base + idx);
 }
 
-static int dispc_get_ctx_loss_count(void)
-{
-       struct device *dev = &dispc.pdev->dev;
-       struct omap_display_platform_data *pdata = dev->platform_data;
-       struct omap_dss_board_info *board_data = pdata->board_data;
-       int cnt;
-
-       if (!board_data->get_context_loss_count)
-               return -ENOENT;
-
-       cnt = board_data->get_context_loss_count(dev);
-
-       WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
-       return cnt;
-}
-
 #define SR(reg) \
        dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
 #define RR(reg) \
@@ -251,7 +234,7 @@ static void dispc_save_context(void)
        if (dss_has_feature(FEAT_CORE_CLK_DIV))
                SR(DIVISOR);
 
-       dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+       dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
        dispc.ctx_valid = true;
 
        DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -266,7 +249,7 @@ static void dispc_restore_context(void)
        if (!dispc.ctx_valid)
                return;
 
-       ctx = dispc_get_ctx_loss_count();
+       ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
 
        if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
                return;
@@ -401,7 +384,7 @@ void dispc_runtime_put(void)
        DSSDBG("dispc_runtime_put\n");
 
        r = pm_runtime_put_sync(&dispc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
@@ -413,14 +396,6 @@ static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
                return false;
 }
 
-static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
-{
-       struct omap_overlay_manager *mgr =
-               omap_dss_get_overlay_manager(channel);
-
-       return mgr ? mgr->device : NULL;
-}
-
 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
 {
        switch (channel) {
@@ -432,6 +407,7 @@ u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
                return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -446,6 +422,7 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
                return 0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -764,7 +741,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
                case OMAP_DSS_COLOR_XRGB16_1555:
                        m = 0xf; break;
                default:
-                       BUG(); break;
+                       BUG(); return;
                }
        } else {
                switch (color_mode) {
@@ -801,13 +778,25 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
                case OMAP_DSS_COLOR_XRGB16_1555:
                        m = 0xf; break;
                default:
-                       BUG(); break;
+                       BUG(); return;
                }
        }
 
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
 }
 
+static void dispc_ovl_configure_burst_type(enum omap_plane plane,
+               enum omap_dss_rotation_type rotation_type)
+{
+       if (dss_has_feature(FEAT_BURST_2D) == 0)
+               return;
+
+       if (rotation_type == OMAP_DSS_ROT_TILER)
+               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
+       else
+               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
+}
+
 void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
 {
        int shift;
@@ -845,6 +834,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
                        break;
                default:
                        BUG();
+                       return;
                }
 
                val = FLD_MOD(val, chan, shift, shift);
@@ -872,6 +862,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
                break;
        default:
                BUG();
+               return 0;
        }
 
        val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -983,20 +974,13 @@ static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
 }
 
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
+static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
+               u16 height)
 {
        u32 val;
-       BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
-       val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
-       dispc_write_reg(DISPC_SIZE_MGR(channel), val);
-}
 
-void dispc_set_digit_size(u16 width, u16 height)
-{
-       u32 val;
-       BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
        val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
-       dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
+       dispc_write_reg(DISPC_SIZE_MGR(channel), val);
 }
 
 static void dispc_read_plane_fifo_sizes(void)
@@ -1063,7 +1047,8 @@ void dispc_enable_fifomerge(bool enable)
 }
 
 void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
-               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge)
+               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+               bool manual_update)
 {
        /*
         * All sizes are in bytes. Both the buffer and burst are made of
@@ -1091,7 +1076,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
         * combined fifo size
         */
 
-       if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+       if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
                *fifo_low = ovl_fifo_size - burst_size * 2;
                *fifo_high = total_fifo_size - burst_size;
        } else {
@@ -1185,6 +1170,94 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
        dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
 }
 
+static void dispc_ovl_set_accu_uv(enum omap_plane plane,
+               u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
+               bool ilace, enum omap_color_mode color_mode, u8 rotation)
+{
+       int h_accu2_0, h_accu2_1;
+       int v_accu2_0, v_accu2_1;
+       int chroma_hinc, chroma_vinc;
+       int idx;
+
+       struct accu {
+               s8 h0_m, h0_n;
+               s8 h1_m, h1_n;
+               s8 v0_m, v0_n;
+               s8 v1_m, v1_n;
+       };
+
+       const struct accu *accu_table;
+       const struct accu *accu_val;
+
+       static const struct accu accu_nv12[4] = {
+               {  0, 1,  0, 1 , -1, 2, 0, 1 },
+               {  1, 2, -3, 4 ,  0, 1, 0, 1 },
+               { -1, 1,  0, 1 , -1, 2, 0, 1 },
+               { -1, 2, -1, 2 , -1, 1, 0, 1 },
+       };
+
+       static const struct accu accu_nv12_ilace[4] = {
+               {  0, 1,  0, 1 , -3, 4, -1, 4 },
+               { -1, 4, -3, 4 ,  0, 1,  0, 1 },
+               { -1, 1,  0, 1 , -1, 4, -3, 4 },
+               { -3, 4, -3, 4 , -1, 1,  0, 1 },
+       };
+
+       static const struct accu accu_yuv[4] = {
+               {  0, 1, 0, 1,  0, 1, 0, 1 },
+               {  0, 1, 0, 1,  0, 1, 0, 1 },
+               { -1, 1, 0, 1,  0, 1, 0, 1 },
+               {  0, 1, 0, 1, -1, 1, 0, 1 },
+       };
+
+       switch (rotation) {
+       case OMAP_DSS_ROT_0:
+               idx = 0;
+               break;
+       case OMAP_DSS_ROT_90:
+               idx = 1;
+               break;
+       case OMAP_DSS_ROT_180:
+               idx = 2;
+               break;
+       case OMAP_DSS_ROT_270:
+               idx = 3;
+               break;
+       default:
+               BUG();
+               return;
+       }
+
+       switch (color_mode) {
+       case OMAP_DSS_COLOR_NV12:
+               if (ilace)
+                       accu_table = accu_nv12_ilace;
+               else
+                       accu_table = accu_nv12;
+               break;
+       case OMAP_DSS_COLOR_YUV2:
+       case OMAP_DSS_COLOR_UYVY:
+               accu_table = accu_yuv;
+               break;
+       default:
+               BUG();
+               return;
+       }
+
+       accu_val = &accu_table[idx];
+
+       chroma_hinc = 1024 * orig_width / out_width;
+       chroma_vinc = 1024 * orig_height / out_height;
+
+       h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
+       h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
+       v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
+       v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
+
+       dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
+       dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
+}
+
 static void dispc_ovl_set_scaling_common(enum omap_plane plane,
                u16 orig_width, u16 orig_height,
                u16 out_width, u16 out_height,
@@ -1258,6 +1331,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
                REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
                return;
        }
+
+       dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
+                       out_height, ilace, color_mode, rotation);
+
        switch (color_mode) {
        case OMAP_DSS_COLOR_NV12:
                /* UV is subsampled by 2 vertically*/
@@ -1280,6 +1357,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
                break;
        default:
                BUG();
+               return;
        }
 
        if (out_width != orig_width)
@@ -1297,9 +1375,6 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
        /* set V scaling */
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
-
-       dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
-       dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
 }
 
 static void dispc_ovl_set_scaling(enum omap_plane plane,
@@ -1410,6 +1485,7 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
                return 32;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -1423,6 +1499,7 @@ static s32 pixinc(int pixels, u8 ps)
                return 1 - (-pixels + 1) * ps;
        else
                BUG();
+               return 0;
 }
 
 static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1431,7 +1508,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
                enum omap_color_mode color_mode, bool fieldmode,
                unsigned int field_offset,
                unsigned *offset0, unsigned *offset1,
-               s32 *row_inc, s32 *pix_inc)
+               s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
 {
        u8 ps;
 
@@ -1477,10 +1554,10 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
                else
                        *offset0 = 0;
 
-               *row_inc = pixinc(1 + (screen_width - width) +
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(1, ps);
+               *row_inc = pixinc(1 +
+                       (y_predecim * screen_width - x_predecim * width) +
+                       (fieldmode ? screen_width : 0), ps);
+               *pix_inc = pixinc(x_predecim, ps);
                break;
 
        case OMAP_DSS_ROT_0 + 4:
@@ -1498,14 +1575,15 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = field_offset * screen_width * ps;
                else
                        *offset0 = 0;
-               *row_inc = pixinc(1 - (screen_width + width) -
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(1, ps);
+               *row_inc = pixinc(1 -
+                       (y_predecim * screen_width + x_predecim * width) -
+                       (fieldmode ? screen_width : 0), ps);
+               *pix_inc = pixinc(x_predecim, ps);
                break;
 
        default:
                BUG();
+               return;
        }
 }
 
@@ -1515,7 +1593,7 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                enum omap_color_mode color_mode, bool fieldmode,
                unsigned int field_offset,
                unsigned *offset0, unsigned *offset1,
-               s32 *row_inc, s32 *pix_inc)
+               s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
 {
        u8 ps;
        u16 fbw, fbh;
@@ -1557,10 +1635,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * screen_width * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(1 + (screen_width - fbw) +
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(1, ps);
+               *row_inc = pixinc(1 +
+                       (y_predecim * screen_width - fbw * x_predecim) +
+                       (fieldmode ? screen_width : 0), ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(x_predecim, ps);
                break;
        case OMAP_DSS_ROT_90:
                *offset1 = screen_width * (fbh - 1) * ps;
@@ -1568,9 +1650,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(screen_width * (fbh - 1) + 1 +
-                               (fieldmode ? 1 : 0), ps);
-               *pix_inc = pixinc(-screen_width, ps);
+               *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
+                               y_predecim + (fieldmode ? 1 : 0), ps);
+               *pix_inc = pixinc(-x_predecim * screen_width, ps);
                break;
        case OMAP_DSS_ROT_180:
                *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
@@ -1579,10 +1661,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                else
                        *offset0 = *offset1;
                *row_inc = pixinc(-1 -
-                               (screen_width - fbw) -
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(-1, ps);
+                       (y_predecim * screen_width - fbw * x_predecim) -
+                       (fieldmode ? screen_width : 0), ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(-x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(-x_predecim, ps);
                break;
        case OMAP_DSS_ROT_270:
                *offset1 = (fbw - 1) * ps;
@@ -1590,9 +1675,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 - field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
-                               (fieldmode ? 1 : 0), ps);
-               *pix_inc = pixinc(screen_width, ps);
+               *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
+                               y_predecim - (fieldmode ? 1 : 0), ps);
+               *pix_inc = pixinc(x_predecim * screen_width, ps);
                break;
 
        /* mirroring */
@@ -1602,10 +1687,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * screen_width * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(screen_width * 2 - 1 +
+               *row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
                                (fieldmode ? screen_width : 0),
                                ps);
-               *pix_inc = pixinc(-1, ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(-x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(-x_predecim, ps);
                break;
 
        case OMAP_DSS_ROT_90 + 4:
@@ -1614,10 +1703,10 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
-                               (fieldmode ? 1 : 0),
+               *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
+                               y_predecim + (fieldmode ? 1 : 0),
                                ps);
-               *pix_inc = pixinc(screen_width, ps);
+               *pix_inc = pixinc(x_predecim * screen_width, ps);
                break;
 
        case OMAP_DSS_ROT_180 + 4:
@@ -1626,10 +1715,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 - field_offset * screen_width * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(1 - screen_width * 2 -
+               *row_inc = pixinc(1 - y_predecim * screen_width * 2 -
                                (fieldmode ? screen_width : 0),
                                ps);
-               *pix_inc = pixinc(1, ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(x_predecim, ps);
                break;
 
        case OMAP_DSS_ROT_270 + 4:
@@ -1638,34 +1731,130 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 - field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(screen_width * (fbh - 1) - 1 -
-                               (fieldmode ? 1 : 0),
+               *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
+                               y_predecim - (fieldmode ? 1 : 0),
                                ps);
-               *pix_inc = pixinc(-screen_width, ps);
+               *pix_inc = pixinc(-x_predecim * screen_width, ps);
                break;
 
        default:
                BUG();
+               return;
        }
 }
 
-static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
+               enum omap_color_mode color_mode, bool fieldmode,
+               unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+               s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
+{
+       u8 ps;
+
+       switch (color_mode) {
+       case OMAP_DSS_COLOR_CLUT1:
+       case OMAP_DSS_COLOR_CLUT2:
+       case OMAP_DSS_COLOR_CLUT4:
+       case OMAP_DSS_COLOR_CLUT8:
+               BUG();
+               return;
+       default:
+               ps = color_mode_to_bpp(color_mode) / 8;
+               break;
+       }
+
+       DSSDBG("scrw %d, width %d\n", screen_width, width);
+
+       /*
+        * field 0 = even field = bottom field
+        * field 1 = odd field = top field
+        */
+       *offset1 = 0;
+       if (field_offset)
+               *offset0 = *offset1 + field_offset * screen_width * ps;
+       else
+               *offset0 = *offset1;
+       *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
+                       (fieldmode ? screen_width : 0), ps);
+       if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+               color_mode == OMAP_DSS_COLOR_UYVY)
+               *pix_inc = pixinc(x_predecim, 2 * ps);
+       else
+               *pix_inc = pixinc(x_predecim, ps);
+}
+
+/*
+ * This function is used to avoid synclosts in OMAP3, because of some
+ * undocumented horizontal position and timing related limitations.
+ */
+static int check_horiz_timing_omap3(enum omap_channel channel,
+               const struct omap_video_timings *t, u16 pos_x,
+               u16 width, u16 height, u16 out_width, u16 out_height)
+{
+       int DS = DIV_ROUND_UP(height, out_height);
+       unsigned long nonactive, lclk, pclk;
+       static const u8 limits[3] = { 8, 10, 20 };
+       u64 val, blank;
+       int i;
+
+       nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+       pclk = dispc_mgr_pclk_rate(channel);
+       if (dispc_mgr_is_lcd(channel))
+               lclk = dispc_mgr_lclk_rate(channel);
+       else
+               lclk = dispc_fclk_rate();
+
+       i = 0;
+       if (out_height < height)
+               i++;
+       if (out_width < width)
+               i++;
+       blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+       DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
+       if (blank <= limits[i])
+               return -EINVAL;
+
+       /*
+        * Pixel data should be prepared before visible display point starts.
+        * So, atleast DS-2 lines must have already been fetched by DISPC
+        * during nonactive - pos_x period.
+        */
+       val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
+       DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
+               val, max(0, DS - 2) * width);
+       if (val < max(0, DS - 2) * width)
+               return -EINVAL;
+
+       /*
+        * All lines need to be refilled during the nonactive period of which
+        * only one line can be loaded during the active period. So, atleast
+        * DS - 1 lines should be loaded during nonactive period.
+        */
+       val =  div_u64((u64)nonactive * lclk, pclk);
+       DSSDBG("nonactive * pcd  = %llu, max(0, DS - 1) * width = %d\n",
+               val, max(0, DS - 1) * width);
+       if (val < max(0, DS - 1) * width)
+               return -EINVAL;
+
+       return 0;
+}
+
+static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
+               const struct omap_video_timings *mgr_timings, u16 width,
                u16 height, u16 out_width, u16 out_height,
                enum omap_color_mode color_mode)
 {
-       u32 fclk = 0;
+       u32 core_clk = 0;
        u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
 
        if (height <= out_height && width <= out_width)
                return (unsigned long) pclk;
 
        if (height > out_height) {
-               struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
-               unsigned int ppl = dssdev->panel.timings.x_res;
+               unsigned int ppl = mgr_timings->x_res;
 
                tmp = pclk * height * out_width;
                do_div(tmp, 2 * out_height * ppl);
-               fclk = tmp;
+               core_clk = tmp;
 
                if (height > 2 * out_height) {
                        if (ppl == out_width)
@@ -1673,23 +1862,23 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
 
                        tmp = pclk * (height - 2 * out_height) * out_width;
                        do_div(tmp, 2 * out_height * (ppl - out_width));
-                       fclk = max(fclk, (u32) tmp);
+                       core_clk = max_t(u32, core_clk, tmp);
                }
        }
 
        if (width > out_width) {
                tmp = pclk * width;
                do_div(tmp, out_width);
-               fclk = max(fclk, (u32) tmp);
+               core_clk = max_t(u32, core_clk, tmp);
 
                if (color_mode == OMAP_DSS_COLOR_RGB24U)
-                       fclk <<= 1;
+                       core_clk <<= 1;
        }
 
-       return fclk;
+       return core_clk;
 }
 
-static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
                u16 height, u16 out_width, u16 out_height)
 {
        unsigned int hf, vf;
@@ -1730,15 +1919,20 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
 }
 
 static int dispc_ovl_calc_scaling(enum omap_plane plane,
-               enum omap_channel channel, u16 width, u16 height,
-               u16 out_width, u16 out_height,
-               enum omap_color_mode color_mode, bool *five_taps)
+               enum omap_channel channel,
+               const struct omap_video_timings *mgr_timings,
+               u16 width, u16 height, u16 out_width, u16 out_height,
+               enum omap_color_mode color_mode, bool *five_taps,
+               int *x_predecim, int *y_predecim, u16 pos_x)
 {
        struct omap_overlay *ovl = omap_dss_get_overlay(plane);
        const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
        const int maxsinglelinewidth =
                                dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
-       unsigned long fclk = 0;
+       const int max_decim_limit = 16;
+       unsigned long core_clk = 0;
+       int decim_x, decim_y, error, min_factor;
+       u16 in_width, in_height, in_width_max = 0;
 
        if (width == out_width && height == out_height)
                return 0;
@@ -1746,64 +1940,154 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
        if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
                return -EINVAL;
 
-       if (out_width < width / maxdownscale ||
-                       out_width > width * 8)
+       *x_predecim = max_decim_limit;
+       *y_predecim = max_decim_limit;
+
+       if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
+           color_mode == OMAP_DSS_COLOR_CLUT2 ||
+           color_mode == OMAP_DSS_COLOR_CLUT4 ||
+           color_mode == OMAP_DSS_COLOR_CLUT8) {
+               *x_predecim = 1;
+               *y_predecim = 1;
+               *five_taps = false;
+               return 0;
+       }
+
+       decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
+       decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
+
+       min_factor = min(decim_x, decim_y);
+
+       if (decim_x > *x_predecim || out_width > width * 8)
                return -EINVAL;
 
-       if (out_height < height / maxdownscale ||
-                       out_height > height * 8)
+       if (decim_y > *y_predecim || out_height > height * 8)
                return -EINVAL;
 
        if (cpu_is_omap24xx()) {
-               if (width > maxsinglelinewidth)
-                       DSSERR("Cannot scale max input width exceeded");
                *five_taps = false;
-               fclk = calc_fclk(channel, width, height, out_width,
-                                                               out_height);
+
+               do {
+                       in_height = DIV_ROUND_UP(height, decim_y);
+                       in_width = DIV_ROUND_UP(width, decim_x);
+                       core_clk = calc_core_clk(channel, in_width, in_height,
+                                       out_width, out_height);
+                       error = (in_width > maxsinglelinewidth || !core_clk ||
+                               core_clk > dispc_core_clk_rate());
+                       if (error) {
+                               if (decim_x == decim_y) {
+                                       decim_x = min_factor;
+                                       decim_y++;
+                               } else {
+                                       swap(decim_x, decim_y);
+                                       if (decim_x < decim_y)
+                                               decim_x++;
+                               }
+                       }
+               } while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
+                               error);
+
+               if (in_width > maxsinglelinewidth) {
+                       DSSERR("Cannot scale max input width exceeded");
+                       return -EINVAL;
+               }
        } else if (cpu_is_omap34xx()) {
-               if (width > (maxsinglelinewidth * 2)) {
+
+               do {
+                       in_height = DIV_ROUND_UP(height, decim_y);
+                       in_width = DIV_ROUND_UP(width, decim_x);
+                       core_clk = calc_core_clk_five_taps(channel, mgr_timings,
+                               in_width, in_height, out_width, out_height,
+                               color_mode);
+
+                       error = check_horiz_timing_omap3(channel, mgr_timings,
+                               pos_x, in_width, in_height, out_width,
+                               out_height);
+
+                       if (in_width > maxsinglelinewidth)
+                               if (in_height > out_height &&
+                                       in_height < out_height * 2)
+                                       *five_taps = false;
+                       if (!*five_taps)
+                               core_clk = calc_core_clk(channel, in_width,
+                                       in_height, out_width, out_height);
+                       error = (error || in_width > maxsinglelinewidth * 2 ||
+                               (in_width > maxsinglelinewidth && *five_taps) ||
+                               !core_clk || core_clk > dispc_core_clk_rate());
+                       if (error) {
+                               if (decim_x == decim_y) {
+                                       decim_x = min_factor;
+                                       decim_y++;
+                               } else {
+                                       swap(decim_x, decim_y);
+                                       if (decim_x < decim_y)
+                                               decim_x++;
+                               }
+                       }
+               } while (decim_x <= *x_predecim && decim_y <= *y_predecim
+                       && error);
+
+               if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
+                       height, out_width, out_height)){
+                               DSSERR("horizontal timing too tight\n");
+                               return -EINVAL;
+               }
+
+               if (in_width > (maxsinglelinewidth * 2)) {
                        DSSERR("Cannot setup scaling");
                        DSSERR("width exceeds maximum width possible");
                        return -EINVAL;
                }
-               fclk = calc_fclk_five_taps(channel, width, height, out_width,
-                                               out_height, color_mode);
-               if (width > maxsinglelinewidth) {
-                       if (height > out_height && height < out_height * 2)
-                               *five_taps = false;
-                       else {
-                               DSSERR("cannot setup scaling with five taps");
-                               return -EINVAL;
-                       }
+
+               if (in_width > maxsinglelinewidth && *five_taps) {
+                       DSSERR("cannot setup scaling with five taps");
+                       return -EINVAL;
                }
-               if (!*five_taps)
-                       fclk = calc_fclk(channel, width, height, out_width,
-                                       out_height);
        } else {
-               if (width > maxsinglelinewidth) {
+               int decim_x_min = decim_x;
+               in_height = DIV_ROUND_UP(height, decim_y);
+               in_width_max = dispc_core_clk_rate() /
+                               DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
+                                               out_width);
+               decim_x = DIV_ROUND_UP(width, in_width_max);
+
+               decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
+               if (decim_x > *x_predecim)
+                       return -EINVAL;
+
+               do {
+                       in_width = DIV_ROUND_UP(width, decim_x);
+               } while (decim_x <= *x_predecim &&
+                               in_width > maxsinglelinewidth && decim_x++);
+
+               if (in_width > maxsinglelinewidth) {
                        DSSERR("Cannot scale width exceeds max line width");
                        return -EINVAL;
                }
-               fclk = calc_fclk(channel, width, height, out_width,
-                               out_height);
+
+               core_clk = calc_core_clk(channel, in_width, in_height,
+                               out_width, out_height);
        }
 
-       DSSDBG("required fclk rate = %lu Hz\n", fclk);
-       DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+       DSSDBG("required core clk rate = %lu Hz\n", core_clk);
+       DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
 
-       if (!fclk || fclk > dispc_fclk_rate()) {
+       if (!core_clk || core_clk > dispc_core_clk_rate()) {
                DSSERR("failed to set up scaling, "
-                       "required fclk rate = %lu Hz, "
-                       "current fclk rate = %lu Hz\n",
-                       fclk, dispc_fclk_rate());
+                       "required core clk rate = %lu Hz, "
+                       "current core clk rate = %lu Hz\n",
+                       core_clk, dispc_core_clk_rate());
                return -EINVAL;
        }
 
+       *x_predecim = decim_x;
+       *y_predecim = decim_y;
        return 0;
 }
 
 int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
-               bool ilace, bool replication)
+               bool ilace, bool replication,
+               const struct omap_video_timings *mgr_timings)
 {
        struct omap_overlay *ovl = omap_dss_get_overlay(plane);
        bool five_taps = true;
@@ -1814,8 +2098,11 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
        s32 pix_inc;
        u16 frame_height = oi->height;
        unsigned int field_offset = 0;
-       u16 outw, outh;
+       u16 in_height = oi->height;
+       u16 in_width = oi->width;
+       u16 out_width, out_height;
        enum omap_channel channel;
+       int x_predecim = 1, y_predecim = 1;
 
        channel = dispc_ovl_get_channel_out(plane);
 
@@ -1829,32 +2116,35 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
        if (oi->paddr == 0)
                return -EINVAL;
 
-       outw = oi->out_width == 0 ? oi->width : oi->out_width;
-       outh = oi->out_height == 0 ? oi->height : oi->out_height;
+       out_width = oi->out_width == 0 ? oi->width : oi->out_width;
+       out_height = oi->out_height == 0 ? oi->height : oi->out_height;
 
-       if (ilace && oi->height == outh)
+       if (ilace && oi->height == out_height)
                fieldmode = 1;
 
        if (ilace) {
                if (fieldmode)
-                       oi->height /= 2;
+                       in_height /= 2;
                oi->pos_y /= 2;
-               outh /= 2;
+               out_height /= 2;
 
                DSSDBG("adjusting for ilace: height %d, pos_y %d, "
                                "out_height %d\n",
-                               oi->height, oi->pos_y, outh);
+                               in_height, oi->pos_y, out_height);
        }
 
        if (!dss_feat_color_mode_supported(plane, oi->color_mode))
                return -EINVAL;
 
-       r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
-                       outw, outh, oi->color_mode,
-                       &five_taps);
+       r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width,
+                       in_height, out_width, out_height, oi->color_mode,
+                       &five_taps, &x_predecim, &y_predecim, oi->pos_x);
        if (r)
                return r;
 
+       in_width = DIV_ROUND_UP(in_width, x_predecim);
+       in_height = DIV_ROUND_UP(in_height, y_predecim);
+
        if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
                        oi->color_mode == OMAP_DSS_COLOR_UYVY ||
                        oi->color_mode == OMAP_DSS_COLOR_NV12)
@@ -1868,32 +2158,46 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
                 * so the integer part must be added to the base address of the
                 * bottom field.
                 */
-               if (!oi->height || oi->height == outh)
+               if (!in_height || in_height == out_height)
                        field_offset = 0;
                else
-                       field_offset = oi->height / outh / 2;
+                       field_offset = in_height / out_height / 2;
        }
 
        /* Fields are independent but interleaved in memory. */
        if (fieldmode)
                field_offset = 1;
 
-       if (oi->rotation_type == OMAP_DSS_ROT_DMA)
+       offset0 = 0;
+       offset1 = 0;
+       row_inc = 0;
+       pix_inc = 0;
+
+       if (oi->rotation_type == OMAP_DSS_ROT_TILER)
+               calc_tiler_rotation_offset(oi->screen_width, in_width,
+                               oi->color_mode, fieldmode, field_offset,
+                               &offset0, &offset1, &row_inc, &pix_inc,
+                               x_predecim, y_predecim);
+       else if (oi->rotation_type == OMAP_DSS_ROT_DMA)
                calc_dma_rotation_offset(oi->rotation, oi->mirror,
-                               oi->screen_width, oi->width, frame_height,
+                               oi->screen_width, in_width, frame_height,
                                oi->color_mode, fieldmode, field_offset,
-                               &offset0, &offset1, &row_inc, &pix_inc);
+                               &offset0, &offset1, &row_inc, &pix_inc,
+                               x_predecim, y_predecim);
        else
                calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
-                               oi->screen_width, oi->width, frame_height,
+                               oi->screen_width, in_width, frame_height,
                                oi->color_mode, fieldmode, field_offset,
-                               &offset0, &offset1, &row_inc, &pix_inc);
+                               &offset0, &offset1, &row_inc, &pix_inc,
+                               x_predecim, y_predecim);
 
        DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
                        offset0, offset1, row_inc, pix_inc);
 
        dispc_ovl_set_color_mode(plane, oi->color_mode);
 
+       dispc_ovl_configure_burst_type(plane, oi->rotation_type);
+
        dispc_ovl_set_ba0(plane, oi->paddr + offset0);
        dispc_ovl_set_ba1(plane, oi->paddr + offset1);
 
@@ -1906,19 +2210,18 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
        dispc_ovl_set_row_inc(plane, row_inc);
        dispc_ovl_set_pix_inc(plane, pix_inc);
 
-       DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
-                       oi->height, outw, outh);
+       DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width,
+                       in_height, out_width, out_height);
 
        dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
 
-       dispc_ovl_set_pic_size(plane, oi->width, oi->height);
+       dispc_ovl_set_pic_size(plane, in_width, in_height);
 
        if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
-               dispc_ovl_set_scaling(plane, oi->width, oi->height,
-                                  outw, outh,
-                                  ilace, five_taps, fieldmode,
+               dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
+                                  out_height, ilace, five_taps, fieldmode,
                                   oi->color_mode, oi->rotation);
-               dispc_ovl_set_vid_size(plane, outw, outh);
+               dispc_ovl_set_vid_size(plane, out_width, out_height);
                dispc_ovl_set_vid_color_conv(plane, cconv);
        }
 
@@ -2087,8 +2390,10 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
                return !!REG_GET(DISPC_CONTROL, 1, 1);
        else if (channel == OMAP_DSS_CHANNEL_LCD2)
                return !!REG_GET(DISPC_CONTROL2, 0, 0);
-       else
+       else {
                BUG();
+               return false;
+       }
 }
 
 void dispc_mgr_enable(enum omap_channel channel, bool enable)
@@ -2285,6 +2590,12 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
                REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
 }
 
+static bool _dispc_mgr_size_ok(u16 width, u16 height)
+{
+       return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
+               height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
+}
+
 static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
                int vsw, int vfp, int vbp)
 {
@@ -2309,11 +2620,20 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
        return true;
 }
 
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+               const struct omap_video_timings *timings)
 {
-       return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
-                       timings->hbp, timings->vsw,
-                       timings->vfp, timings->vbp);
+       bool timings_ok;
+
+       timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
+
+       if (dispc_mgr_is_lcd(channel))
+               timings_ok =  timings_ok && _dispc_lcd_timings_ok(timings->hsw,
+                                               timings->hfp, timings->hbp,
+                                               timings->vsw, timings->vfp,
+                                               timings->vbp);
+
+       return timings_ok;
 }
 
 static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -2340,37 +2660,45 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
 }
 
 /* change name to mode? */
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
                struct omap_video_timings *timings)
 {
        unsigned xtot, ytot;
        unsigned long ht, vt;
+       struct omap_video_timings t = *timings;
+
+       DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
 
-       if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
-                               timings->hbp, timings->vsw,
-                               timings->vfp, timings->vbp))
+       if (!dispc_mgr_timings_ok(channel, &t)) {
                BUG();
+               return;
+       }
+
+       if (dispc_mgr_is_lcd(channel)) {
+               _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
+                               t.vfp, t.vbp);
+
+               xtot = t.x_res + t.hfp + t.hsw + t.hbp;
+               ytot = t.y_res + t.vfp + t.vsw + t.vbp;
 
-       _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp,
-                       timings->hbp, timings->vsw, timings->vfp,
-                       timings->vbp);
+               ht = (timings->pixel_clock * 1000) / xtot;
+               vt = (timings->pixel_clock * 1000) / xtot / ytot;
 
-       dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res);
+               DSSDBG("pck %u\n", timings->pixel_clock);
+               DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+                       t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
 
-       xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
-       ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
+               DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+       } else {
+               enum dss_hdmi_venc_clk_source_select source;
 
-       ht = (timings->pixel_clock * 1000) / xtot;
-       vt = (timings->pixel_clock * 1000) / xtot / ytot;
+               source = dss_get_hdmi_venc_clk_source();
 
-       DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
-                       timings->y_res);
-       DSSDBG("pck %u\n", timings->pixel_clock);
-       DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
-                       timings->hsw, timings->hfp, timings->hbp,
-                       timings->vsw, timings->vfp, timings->vbp);
+               if (source == DSS_VENC_TV_CLK)
+                       t.y_res /= 2;
+       }
 
-       DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+       dispc_mgr_set_size(channel, t.x_res, t.y_res);
 }
 
 static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
@@ -2411,6 +2739,7 @@ unsigned long dispc_fclk_rate(void)
                break;
        default:
                BUG();
+               return 0;
        }
 
        return r;
@@ -2441,6 +2770,7 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
                break;
        default:
                BUG();
+               return 0;
        }
 
        return r / lcd;
@@ -2462,20 +2792,35 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
 
                return r / pcd;
        } else {
-               struct omap_dss_device *dssdev =
-                       dispc_mgr_get_device(channel);
+               enum dss_hdmi_venc_clk_source_select source;
 
-               switch (dssdev->type) {
-               case OMAP_DISPLAY_TYPE_VENC:
+               source = dss_get_hdmi_venc_clk_source();
+
+               switch (source) {
+               case DSS_VENC_TV_CLK:
                        return venc_get_pixel_clock();
-               case OMAP_DISPLAY_TYPE_HDMI:
+               case DSS_HDMI_M_PCLK:
                        return hdmi_get_pixel_clock();
                default:
                        BUG();
+                       return 0;
                }
        }
 }
 
+unsigned long dispc_core_clk_rate(void)
+{
+       int lcd;
+       unsigned long fclk = dispc_fclk_rate();
+
+       if (dss_has_feature(FEAT_CORE_CLK_DIV))
+               lcd = REG_GET(DISPC_DIVISOR, 23, 16);
+       else
+               lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
+
+       return fclk / lcd;
+}
+
 void dispc_dump_clocks(struct seq_file *s)
 {
        int lcd, pcd;
@@ -2588,7 +2933,7 @@ void dispc_dump_irqs(struct seq_file *s)
 }
 #endif
 
-void dispc_dump_regs(struct seq_file *s)
+static void dispc_dump_regs(struct seq_file *s)
 {
        int i, j;
        const char *mgr_names[] = {
@@ -3247,27 +3592,6 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
        return 0;
 }
 
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-void dispc_fake_vsync_irq(void)
-{
-       u32 irqstatus = DISPC_IRQ_VSYNC;
-       int i;
-
-       WARN_ON(!in_interrupt());
-
-       for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-               struct omap_dispc_isr_data *isr_data;
-               isr_data = &dispc.registered_isr[i];
-
-               if (!isr_data->isr)
-                       continue;
-
-               if (isr_data->mask & irqstatus)
-                       isr_data->isr(isr_data->arg, irqstatus);
-       }
-}
-#endif
-
 static void _omap_dispc_initialize_irq(void)
 {
        unsigned long flags;
@@ -3330,7 +3654,7 @@ static void _omap_dispc_initial_config(void)
 }
 
 /* DISPC HW IP initialisation */
-static int omap_dispchw_probe(struct platform_device *pdev)
+static int __init omap_dispchw_probe(struct platform_device *pdev)
 {
        u32 rev;
        int r = 0;
@@ -3399,6 +3723,11 @@ static int omap_dispchw_probe(struct platform_device *pdev)
 
        dispc_runtime_put();
 
+       dss_debugfs_create_file("dispc", dispc_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+       dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
+#endif
        return 0;
 
 err_runtime_get:
@@ -3407,7 +3736,7 @@ err_runtime_get:
        return r;
 }
 
-static int omap_dispchw_remove(struct platform_device *pdev)
+static int __exit omap_dispchw_remove(struct platform_device *pdev)
 {
        pm_runtime_disable(&pdev->dev);
 
@@ -3419,19 +3748,12 @@ static int omap_dispchw_remove(struct platform_device *pdev)
 static int dispc_runtime_suspend(struct device *dev)
 {
        dispc_save_context();
-       dss_runtime_put();
 
        return 0;
 }
 
 static int dispc_runtime_resume(struct device *dev)
 {
-       int r;
-
-       r = dss_runtime_get();
-       if (r < 0)
-               return r;
-
        dispc_restore_context();
 
        return 0;
@@ -3443,8 +3765,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
 };
 
 static struct platform_driver omap_dispchw_driver = {
-       .probe          = omap_dispchw_probe,
-       .remove         = omap_dispchw_remove,
+       .remove         = __exit_p(omap_dispchw_remove),
        .driver         = {
                .name   = "omapdss_dispc",
                .owner  = THIS_MODULE,
@@ -3452,12 +3773,12 @@ static struct platform_driver omap_dispchw_driver = {
        },
 };
 
-int dispc_init_platform_driver(void)
+int __init dispc_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dispchw_driver);
+       return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
 }
 
-void dispc_uninit_platform_driver(void)
+void __exit dispc_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_dispchw_driver);
+       platform_driver_unregister(&omap_dispchw_driver);
 }
index 5836bd1650f9a93c53430734361065c47fef4c26..f278080e1063f2a92b102b0157ab80396bfe095d 100644 (file)
@@ -120,6 +120,7 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
                return 0x03AC;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -134,6 +135,7 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
                return 0x03B0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -144,10 +146,12 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel)
                return 0x0064;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x0400;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -158,10 +162,12 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel)
                return 0x0068;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x0404;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -172,10 +178,12 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
                return 0x006C;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x0408;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -186,10 +194,12 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel)
                return 0x0070;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x040C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -205,6 +215,7 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
                return 0x03CC;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -215,10 +226,12 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
                return 0x01D4;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03C0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -229,10 +242,12 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
                return 0x01D8;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03C4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -243,10 +258,12 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
                return 0x01DC;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03C8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -257,10 +274,12 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
                return 0x0220;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03BC;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -271,10 +290,12 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
                return 0x0224;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03B8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -285,10 +306,12 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
                return 0x0228;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03B4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -306,6 +329,7 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
                return 0x0300;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -321,6 +345,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
                return 0x0008;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -335,6 +360,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
                return 0x000C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -343,6 +369,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0544;
        case OMAP_DSS_VIDEO2:
@@ -351,6 +378,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
                return 0x0310;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -359,6 +387,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0548;
        case OMAP_DSS_VIDEO2:
@@ -367,6 +396,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
                return 0x0314;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -381,6 +411,7 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
                return 0x009C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -395,6 +426,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
                return 0x00A8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -410,6 +442,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
                return 0x0070;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -418,6 +451,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0568;
        case OMAP_DSS_VIDEO2:
@@ -426,6 +460,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
                return 0x032C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -441,6 +476,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
                return 0x008C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -456,6 +492,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
                return 0x0088;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -471,6 +508,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
                return 0x00A4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -486,6 +524,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
                return 0x0098;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -498,8 +537,10 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
        case OMAP_DSS_VIDEO2:
        case OMAP_DSS_VIDEO3:
                BUG();
+               return 0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -512,8 +553,10 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
        case OMAP_DSS_VIDEO2:
        case OMAP_DSS_VIDEO3:
                BUG();
+               return 0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -522,6 +565,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0024;
@@ -529,6 +573,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
                return 0x0090;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -537,6 +582,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0580;
        case OMAP_DSS_VIDEO2:
@@ -545,6 +591,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
                return 0x0424;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -553,6 +600,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0028;
@@ -560,6 +608,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
                return 0x0094;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -569,6 +618,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x002C;
@@ -576,6 +626,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
                return 0x0000;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -584,6 +635,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0584;
        case OMAP_DSS_VIDEO2:
@@ -592,6 +644,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
                return 0x0428;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -600,6 +653,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0030;
@@ -607,6 +661,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
                return 0x0004;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -615,6 +670,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0588;
        case OMAP_DSS_VIDEO2:
@@ -623,6 +679,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
                return 0x042C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -632,6 +689,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0034 + i * 0x8;
@@ -639,6 +697,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
                return 0x0010 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -648,6 +707,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x058C + i * 0x8;
        case OMAP_DSS_VIDEO2:
@@ -656,6 +716,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
                return 0x0430 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -665,6 +726,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0038 + i * 0x8;
@@ -672,6 +734,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
                return 0x0014 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -681,6 +744,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0590 + i * 8;
        case OMAP_DSS_VIDEO2:
@@ -689,6 +753,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
                return 0x0434 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -698,12 +763,14 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
        case OMAP_DSS_VIDEO3:
                return 0x0074 + i * 0x4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -713,6 +780,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0124 + i * 0x4;
        case OMAP_DSS_VIDEO2:
@@ -721,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
                return 0x0050 + i * 0x4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -730,6 +799,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x05CC + i * 0x4;
        case OMAP_DSS_VIDEO2:
@@ -738,6 +808,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
                return 0x0470 + i * 0x4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -754,6 +825,7 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
                return 0x00A0;
        default:
                BUG();
+               return 0;
        }
 }
 #endif
index 4424c198dbcda6c3a34fa53189ecd28a5a446388..24901063037024f25bae5ee1ba44ec3fe074001a 100644 (file)
@@ -304,10 +304,18 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
                return 24;
        default:
                BUG();
+               return 0;
        }
 }
 EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
 
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       *timings = dssdev->panel.timings;
+}
+EXPORT_SYMBOL(omapdss_default_get_timings);
+
 /* Checks if replication logic should be used. Only use for active matrix,
  * when overlay is in RGB12U or RGB16 mode, and LCD interface is
  * 18bpp or 24bpp */
@@ -340,6 +348,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
                break;
        default:
                BUG();
+               return false;
        }
 
        return bpp > 16;
@@ -352,46 +361,6 @@ void dss_init_device(struct platform_device *pdev,
        int i;
        int r;
 
-       switch (dssdev->type) {
-#ifdef CONFIG_OMAP2_DSS_DPI
-       case OMAP_DISPLAY_TYPE_DPI:
-               r = dpi_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
-       case OMAP_DISPLAY_TYPE_DBI:
-               r = rfbi_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
-       case OMAP_DISPLAY_TYPE_VENC:
-               r = venc_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
-       case OMAP_DISPLAY_TYPE_SDI:
-               r = sdi_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
-       case OMAP_DISPLAY_TYPE_DSI:
-               r = dsi_init_display(dssdev);
-               break;
-#endif
-       case OMAP_DISPLAY_TYPE_HDMI:
-               r = hdmi_init_display(dssdev);
-               break;
-       default:
-               DSSERR("Support for display '%s' not compiled in.\n",
-                               dssdev->name);
-               return;
-       }
-
-       if (r) {
-               DSSERR("failed to init display %s\n", dssdev->name);
-               return;
-       }
-
        /* create device sysfs files */
        i = 0;
        while ((attr = display_sysfs_attrs[i++]) != NULL) {
index faaf305fda279615a6748ff0cfbbe78e315049a2..8c2056c9537bd1162d08dd0c6d63a2437b926c45 100644 (file)
@@ -156,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
                t->pixel_clock = pck;
        }
 
-       dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+       dss_mgr_set_timings(dssdev->manager, t);
 
        return 0;
 }
@@ -202,10 +202,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
                        goto err_reg_enable;
        }
 
-       r = dss_runtime_get();
-       if (r)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r)
                goto err_get_dispc;
@@ -244,8 +240,6 @@ err_dsi_pll_init:
 err_get_dsi:
        dispc_runtime_put();
 err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
        if (cpu_is_omap34xx())
                regulator_disable(dpi.vdds_dsi_reg);
 err_reg_enable:
@@ -266,7 +260,6 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
        }
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        if (cpu_is_omap34xx())
                regulator_disable(dpi.vdds_dsi_reg);
@@ -283,21 +276,15 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
        DSSDBG("dpi_set_timings\n");
        dssdev->panel.timings = *timings;
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
-               r = dss_runtime_get();
-               if (r)
-                       return;
-
                r = dispc_runtime_get();
-               if (r) {
-                       dss_runtime_put();
+               if (r)
                        return;
-               }
 
                dpi_set_mode(dssdev);
-               dispc_mgr_go(dssdev->manager->id);
 
                dispc_runtime_put();
-               dss_runtime_put();
+       } else {
+               dss_mgr_set_timings(dssdev->manager, timings);
        }
 }
 EXPORT_SYMBOL(dpi_set_timings);
@@ -312,7 +299,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
        unsigned long pck;
        struct dispc_clock_info dispc_cinfo;
 
-       if (!dispc_lcd_timings_ok(timings))
+       if (dss_mgr_check_timings(dssdev->manager, timings))
                return -EINVAL;
 
        if (timings->pixel_clock == 0)
@@ -352,7 +339,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
 }
 EXPORT_SYMBOL(dpi_check_timings);
 
-int dpi_init_display(struct omap_dss_device *dssdev)
+static int __init dpi_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("init_display\n");
 
@@ -378,12 +365,58 @@ int dpi_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-int dpi_init(void)
+static void __init dpi_probe_pdata(struct platform_device *pdev)
 {
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
+                       continue;
+
+               r = dpi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
+static int __init omap_dpi_probe(struct platform_device *pdev)
+{
+       dpi_probe_pdata(pdev);
+
+       return 0;
+}
+
+static int __exit omap_dpi_remove(struct platform_device *pdev)
+{
+       omap_dss_unregister_child_devices(&pdev->dev);
+
        return 0;
 }
 
-void dpi_exit(void)
+static struct platform_driver omap_dpi_driver = {
+       .remove         = __exit_p(omap_dpi_remove),
+       .driver         = {
+               .name   = "omapdss_dpi",
+               .owner  = THIS_MODULE,
+       },
+};
+
+int __init dpi_init_platform_driver(void)
 {
+       return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
 }
 
+void __exit dpi_uninit_platform_driver(void)
+{
+       platform_driver_unregister(&omap_dpi_driver);
+}
index 210a3c4f615012662769010e37d6cd54e28e61e3..14ce8cc079e3d1840e982734b4e2644770ba9b72 100644 (file)
@@ -256,14 +256,13 @@ struct dsi_data {
        struct platform_device *pdev;
        void __iomem    *base;
 
+       int module_id;
+
        int irq;
 
        struct clk *dss_clk;
        struct clk *sys_clk;
 
-       int (*enable_pads)(int dsi_id, unsigned lane_mask);
-       void (*disable_pads)(int dsi_id, unsigned lane_mask);
-
        struct dsi_clock_info current_cinfo;
 
        bool vdds_dsi_enabled;
@@ -361,11 +360,6 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
        return dsi_pdev_map[module];
 }
 
-static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
-{
-       return dsidev->id;
-}
-
 static inline void dsi_write_reg(struct platform_device *dsidev,
                const struct dsi_reg idx, u32 val)
 {
@@ -452,6 +446,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
                return 16;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -1080,7 +1075,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
        DSSDBG("dsi_runtime_put\n");
 
        r = pm_runtime_put_sync(&dsi->pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 /* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1184,10 +1179,9 @@ static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
 {
        unsigned long r;
-       int dsi_module = dsi_get_dsidev_id(dsidev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
-       if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
+       if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
                /* DSI FCLK source is DSS_CLK_FCK */
                r = clk_get_rate(dsi->dss_clk);
        } else {
@@ -1279,10 +1273,9 @@ static int dsi_pll_power(struct platform_device *dsidev,
 }
 
 /* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+static int dsi_calc_clock_rates(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
        if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
@@ -1297,21 +1290,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
        if (cinfo->regm_dsi > dsi->regm_dsi_max)
                return -EINVAL;
 
-       if (cinfo->use_sys_clk) {
-               cinfo->clkin = clk_get_rate(dsi->sys_clk);
-               /* XXX it is unclear if highfreq should be used
-                * with DSS_SYS_CLK source also */
-               cinfo->highfreq = 0;
-       } else {
-               cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
-
-               if (cinfo->clkin < 32000000)
-                       cinfo->highfreq = 0;
-               else
-                       cinfo->highfreq = 1;
-       }
-
-       cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
+       cinfo->clkin = clk_get_rate(dsi->sys_clk);
+       cinfo->fint = cinfo->clkin / cinfo->regn;
 
        if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
                return -EINVAL;
@@ -1378,27 +1358,21 @@ retry:
 
        memset(&cur, 0, sizeof(cur));
        cur.clkin = dss_sys_clk;
-       cur.use_sys_clk = 1;
-       cur.highfreq = 0;
 
-       /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
-       /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
+       /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
        /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
        for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
-               if (cur.highfreq == 0)
-                       cur.fint = cur.clkin / cur.regn;
-               else
-                       cur.fint = cur.clkin / (2 * cur.regn);
+               cur.fint = cur.clkin / cur.regn;
 
                if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
                        continue;
 
-               /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
+               /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
                for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
                        unsigned long a, b;
 
                        a = 2 * cur.regm * (cur.clkin/1000);
-                       b = cur.regn * (cur.highfreq + 1);
+                       b = cur.regn;
                        cur.clkin4ddr = a / b * 1000;
 
                        if (cur.clkin4ddr > 1800 * 1000 * 1000)
@@ -1486,9 +1460,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
 
        DSSDBGF();
 
-       dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
-       dsi->current_cinfo.highfreq = cinfo->highfreq;
-
+       dsi->current_cinfo.clkin = cinfo->clkin;
        dsi->current_cinfo.fint = cinfo->fint;
        dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
        dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
@@ -1503,17 +1475,13 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
 
        DSSDBG("DSI Fint %ld\n", cinfo->fint);
 
-       DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
-                       cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
-                       cinfo->clkin,
-                       cinfo->highfreq);
+       DSSDBG("clkin rate %ld\n", cinfo->clkin);
 
        /* DSIPHY == CLKIN4DDR */
-       DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
+       DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
                        cinfo->regm,
                        cinfo->regn,
                        cinfo->clkin,
-                       cinfo->highfreq + 1,
                        cinfo->clkin4ddr);
 
        DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
@@ -1568,10 +1536,6 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
 
        if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
                l = FLD_MOD(l, f, 4, 1);        /* DSI_PLL_FREQSEL */
-       l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
-                       11, 11);                /* DSI_PLL_CLKSEL */
-       l = FLD_MOD(l, cinfo->highfreq,
-                       12, 12);                /* DSI_PLL_HIGHFREQ */
        l = FLD_MOD(l, 1, 13, 13);              /* DSI_PLL_REFEN */
        l = FLD_MOD(l, 0, 14, 14);              /* DSIPHY_CLKINEN */
        l = FLD_MOD(l, 1, 20, 20);              /* DSI_HSDIVBYPASS */
@@ -1716,7 +1680,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct dsi_clock_info *cinfo = &dsi->current_cinfo;
        enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
-       int dsi_module = dsi_get_dsidev_id(dsidev);
+       int dsi_module = dsi->module_id;
 
        dispc_clk_src = dss_get_dispc_clk_source();
        dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1726,8 +1690,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
 
        seq_printf(s,   "- DSI%d PLL -\n", dsi_module + 1);
 
-       seq_printf(s,   "dsi pll source = %s\n",
-                       cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
+       seq_printf(s,   "dsi pll clkin\t%lu\n", cinfo->clkin);
 
        seq_printf(s,   "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
 
@@ -1789,7 +1752,6 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        unsigned long flags;
        struct dsi_irq_stats stats;
-       int dsi_module = dsi_get_dsidev_id(dsidev);
 
        spin_lock_irqsave(&dsi->irq_stats_lock, flags);
 
@@ -1806,7 +1768,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
 #define PIS(x) \
        seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
 
-       seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
+       seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
        PIS(VC0);
        PIS(VC1);
        PIS(VC2);
@@ -1886,22 +1848,6 @@ static void dsi2_dump_irqs(struct seq_file *s)
 
        dsi_dump_dsidev_irqs(dsidev, s);
 }
-
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops)
-{
-       struct platform_device *dsidev;
-
-       dsidev = dsi_get_dsidev_from_id(0);
-       if (dsidev)
-               debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
-                       &dsi1_dump_irqs, debug_fops);
-
-       dsidev = dsi_get_dsidev_from_id(1);
-       if (dsidev)
-               debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
-                       &dsi2_dump_irqs, debug_fops);
-}
 #endif
 
 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
@@ -2002,21 +1948,6 @@ static void dsi2_dump_regs(struct seq_file *s)
        dsi_dump_dsidev_regs(dsidev, s);
 }
 
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops)
-{
-       struct platform_device *dsidev;
-
-       dsidev = dsi_get_dsidev_from_id(0);
-       if (dsidev)
-               debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
-                       &dsi1_dump_regs, debug_fops);
-
-       dsidev = dsi_get_dsidev_from_id(1);
-       if (dsidev)
-               debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
-                       &dsi2_dump_regs, debug_fops);
-}
 enum dsi_cio_power_state {
        DSI_COMPLEXIO_POWER_OFF         = 0x0,
        DSI_COMPLEXIO_POWER_ON          = 0x1,
@@ -2073,6 +2004,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
                return 1365 * 3;        /* 1365x24 bits */
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -2337,7 +2269,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
 
        DSSDBGF();
 
-       r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+       r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
        if (r)
                return r;
 
@@ -2447,7 +2379,7 @@ err_cio_pwr:
                dsi_cio_disable_lane_override(dsidev);
 err_scp_clk_dom:
        dsi_disable_scp_clk(dsidev);
-       dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+       dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
        return r;
 }
 
@@ -2461,7 +2393,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev)
 
        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
        dsi_disable_scp_clk(dsidev);
-       dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+       dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
 }
 
 static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2485,6 +2417,7 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
                if (add + size > 4) {
                        DSSERR("Illegal FIFO configuration\n");
                        BUG();
+                       return;
                }
 
                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2517,6 +2450,7 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
                if (add + size > 4) {
                        DSSERR("Illegal FIFO configuration\n");
                        BUG();
+                       return;
                }
 
                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2658,6 +2592,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
                return dsi_sync_vc_l4(dsidev, channel);
        default:
                BUG();
+               return -EINVAL;
        }
 }
 
@@ -3226,6 +3161,7 @@ static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
                data = reqdata[0] | (reqdata[1] << 8);
        } else {
                BUG();
+               return -EINVAL;
        }
 
        r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
@@ -3340,7 +3276,6 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
                goto err;
        }
 
-       BUG();
 err:
        DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
                type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
@@ -3735,6 +3670,186 @@ static void dsi_config_blanking_modes(struct omap_dss_device *dssdev)
        dsi_write_reg(dsidev, DSI_CTRL, r);
 }
 
+/*
+ * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
+ * results in maximum transition time for data and clock lanes to enter and
+ * exit HS mode. Hence, this is the scenario where the least amount of command
+ * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
+ * clock cycles that can be used to interleave command mode data in HS so that
+ * all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
+               int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
+{
+       int transition;
+
+       /*
+        * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
+        * time of data lanes only, if it isn't set, we need to consider HS
+        * transition time of both data and clock lanes. HS transition time
+        * of Scenario 3 is considered.
+        */
+       if (ddr_alwon) {
+               transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
+       } else {
+               int trans1, trans2;
+               trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
+               trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
+                               enter_hs + 1;
+               transition = max(trans1, trans2);
+       }
+
+       return blank > transition ? blank - transition : 0;
+}
+
+/*
+ * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
+ * results in maximum transition time for data lanes to enter and exit LP mode.
+ * Hence, this is the scenario where the least amount of command mode data can
+ * be interleaved. We program the minimum amount of bytes that can be
+ * interleaved in LP so that all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
+               int lp_clk_div, int tdsi_fclk)
+{
+       int trans_lp;   /* time required for a LP transition, in TXBYTECLKHS */
+       int tlp_avail;  /* time left for interleaving commands, in CLKIN4DDR */
+       int ttxclkesc;  /* period of LP transmit escape clock, in CLKIN4DDR */
+       int thsbyte_clk = 16;   /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
+       int lp_inter;   /* cmd mode data that can be interleaved, in bytes */
+
+       /* maximum LP transition time according to Scenario 1 */
+       trans_lp = exit_hs + max(enter_hs, 2) + 1;
+
+       /* CLKIN4DDR = 16 * TXBYTECLKHS */
+       tlp_avail = thsbyte_clk * (blank - trans_lp);
+
+       ttxclkesc = tdsi_fclk * lp_clk_div;
+
+       lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
+                       26) / 16;
+
+       return max(lp_inter, 0);
+}
+
+static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+{
+       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       int blanking_mode;
+       int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
+       int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
+       int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
+       int tclk_trail, ths_exit, exiths_clk;
+       bool ddr_alwon;
+       struct omap_video_timings *timings = &dssdev->panel.timings;
+       int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+       int ndl = dsi->num_lanes_used - 1;
+       int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+       int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
+       int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
+       int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
+       int bl_interleave_hs = 0, bl_interleave_lp = 0;
+       u32 r;
+
+       r = dsi_read_reg(dsidev, DSI_CTRL);
+       blanking_mode = FLD_GET(r, 20, 20);
+       hfp_blanking_mode = FLD_GET(r, 21, 21);
+       hbp_blanking_mode = FLD_GET(r, 22, 22);
+       hsa_blanking_mode = FLD_GET(r, 23, 23);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+       hbp = FLD_GET(r, 11, 0);
+       hfp = FLD_GET(r, 23, 12);
+       hsa = FLD_GET(r, 31, 24);
+
+       r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+       ddr_clk_post = FLD_GET(r, 7, 0);
+       ddr_clk_pre = FLD_GET(r, 15, 8);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
+       exit_hs_mode_lat = FLD_GET(r, 15, 0);
+       enter_hs_mode_lat = FLD_GET(r, 31, 16);
+
+       r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
+       lp_clk_div = FLD_GET(r, 12, 0);
+       ddr_alwon = FLD_GET(r, 13, 13);
+
+       r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+       ths_exit = FLD_GET(r, 7, 0);
+
+       r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+       tclk_trail = FLD_GET(r, 15, 8);
+
+       exiths_clk = ths_exit + tclk_trail;
+
+       width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+       bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
+
+       if (!hsa_blanking_mode) {
+               hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+               hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       if (!hfp_blanking_mode) {
+               hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+               hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       if (!hbp_blanking_mode) {
+               hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+               hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       if (!blanking_mode) {
+               bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+               bl_interleave_lp = dsi_compute_interleave_lp(bllp,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+               hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
+               bl_interleave_hs);
+
+       DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+               hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
+               bl_interleave_lp);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
+       r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
+       r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
+       r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
+       dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
+       r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
+       r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
+       r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
+       dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
+       r = FLD_MOD(r, bl_interleave_hs, 31, 15);
+       r = FLD_MOD(r, bl_interleave_lp, 16, 0);
+       dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+}
+
 static int dsi_proto_config(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3769,6 +3884,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
                break;
        default:
                BUG();
+               return -EINVAL;
        }
 
        r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3793,6 +3909,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
        if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
                dsi_config_vp_sync_events(dssdev);
                dsi_config_blanking_modes(dssdev);
+               dsi_config_cmd_mode_interleaving(dssdev);
        }
 
        dsi_vc_initial_config(dsidev, 0);
@@ -4008,6 +4125,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
                        break;
                default:
                        BUG();
+                       return -EINVAL;
                };
 
                dsi_if_enable(dsidev, false);
@@ -4192,10 +4310,6 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
        __cancel_delayed_work(&dsi->framedone_timeout_work);
 
        dsi_handle_framedone(dsidev, 0);
-
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-       dispc_fake_vsync_irq();
-#endif
 }
 
 int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
@@ -4259,13 +4373,12 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
                dispc_mgr_enable_stallmode(dssdev->manager->id, true);
                dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
 
-               dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings);
+               dss_mgr_set_timings(dssdev->manager, &timings);
        } else {
                dispc_mgr_enable_stallmode(dssdev->manager->id, false);
                dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
 
-               dispc_mgr_set_lcd_timings(dssdev->manager->id,
-                       &dssdev->panel.timings);
+               dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
        }
 
                dispc_mgr_set_lcd_display_type(dssdev->manager->id,
@@ -4294,13 +4407,11 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
        struct dsi_clock_info cinfo;
        int r;
 
-       /* we always use DSS_CLK_SYSCK as input clock */
-       cinfo.use_sys_clk = true;
        cinfo.regn  = dssdev->clocks.dsi.regn;
        cinfo.regm  = dssdev->clocks.dsi.regm;
        cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
        cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
-       r = dsi_calc_clock_rates(dssdev, &cinfo);
+       r = dsi_calc_clock_rates(dsidev, &cinfo);
        if (r) {
                DSSERR("Failed to calc dsi clocks\n");
                return r;
@@ -4345,7 +4456,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
 static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       int dsi_module = dsi_get_dsidev_id(dsidev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        int r;
 
        r = dsi_pll_init(dsidev, true, true);
@@ -4357,7 +4468,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
                goto err1;
 
        dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
-       dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
+       dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
        dss_select_lcd_clk_source(dssdev->manager->id,
                        dssdev->clocks.dispc.channel.lcd_clk_src);
 
@@ -4396,7 +4507,7 @@ err3:
        dsi_cio_uninit(dssdev);
 err2:
        dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
-       dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
        dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
 
 err1:
@@ -4410,7 +4521,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       int dsi_module = dsi_get_dsidev_id(dsidev);
 
        if (enter_ulps && !dsi->ulps_enabled)
                dsi_enter_ulps(dsidev);
@@ -4423,7 +4533,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
        dsi_vc_enable(dsidev, 3, 0);
 
        dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
-       dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
        dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
        dsi_cio_uninit(dssdev);
        dsi_pll_uninit(dsidev, disconnect_lanes);
@@ -4527,7 +4637,7 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
 }
 EXPORT_SYMBOL(omapdss_dsi_enable_te);
 
-int dsi_init_display(struct omap_dss_device *dssdev)
+static int __init dsi_init_display(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4680,13 +4790,39 @@ static void dsi_put_clocks(struct platform_device *dsidev)
                clk_put(dsi->sys_clk);
 }
 
+static void __init dsi_probe_pdata(struct platform_device *dsidev)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct omap_dss_board_info *pdata = dsidev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
+                       continue;
+
+               if (dssdev->phy.dsi.module != dsi->module_id)
+                       continue;
+
+               r = dsi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &dsidev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
 /* DSI1 HW IP initialisation */
-static int omap_dsihw_probe(struct platform_device *dsidev)
+static int __init omap_dsihw_probe(struct platform_device *dsidev)
 {
-       struct omap_display_platform_data *dss_plat_data;
-       struct omap_dss_board_info *board_info;
        u32 rev;
-       int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
+       int r, i;
        struct resource *dsi_mem;
        struct dsi_data *dsi;
 
@@ -4694,15 +4830,11 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
        if (!dsi)
                return -ENOMEM;
 
+       dsi->module_id = dsidev->id;
        dsi->pdev = dsidev;
-       dsi_pdev_map[dsi_module] = dsidev;
+       dsi_pdev_map[dsi->module_id] = dsidev;
        dev_set_drvdata(&dsidev->dev, dsi);
 
-       dss_plat_data = dsidev->dev.platform_data;
-       board_info = dss_plat_data->board_data;
-       dsi->enable_pads = board_info->dsi_enable_pads;
-       dsi->disable_pads = board_info->dsi_disable_pads;
-
        spin_lock_init(&dsi->irq_lock);
        spin_lock_init(&dsi->errors_lock);
        dsi->errors = 0;
@@ -4780,8 +4912,21 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
        else
                dsi->num_lanes_supported = 3;
 
+       dsi_probe_pdata(dsidev);
+
        dsi_runtime_put(dsidev);
 
+       if (dsi->module_id == 0)
+               dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
+       else if (dsi->module_id == 1)
+               dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+       if (dsi->module_id == 0)
+               dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
+       else if (dsi->module_id == 1)
+               dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
+#endif
        return 0;
 
 err_runtime_get:
@@ -4790,12 +4935,14 @@ err_runtime_get:
        return r;
 }
 
-static int omap_dsihw_remove(struct platform_device *dsidev)
+static int __exit omap_dsihw_remove(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
        WARN_ON(dsi->scp_clk_refcount > 0);
 
+       omap_dss_unregister_child_devices(&dsidev->dev);
+
        pm_runtime_disable(&dsidev->dev);
 
        dsi_put_clocks(dsidev);
@@ -4816,7 +4963,6 @@ static int omap_dsihw_remove(struct platform_device *dsidev)
 static int dsi_runtime_suspend(struct device *dev)
 {
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -4825,20 +4971,11 @@ static int dsi_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r)
-               goto err_get_dispc;
+               return r;
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops dsi_pm_ops = {
@@ -4847,8 +4984,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
 };
 
 static struct platform_driver omap_dsihw_driver = {
-       .probe          = omap_dsihw_probe,
-       .remove         = omap_dsihw_remove,
+       .remove         = __exit_p(omap_dsihw_remove),
        .driver         = {
                .name   = "omapdss_dsi",
                .owner  = THIS_MODULE,
@@ -4856,12 +4992,12 @@ static struct platform_driver omap_dsihw_driver = {
        },
 };
 
-int dsi_init_platform_driver(void)
+int __init dsi_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dsihw_driver);
+       return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
 }
 
-void dsi_uninit_platform_driver(void)
+void __exit dsi_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_dsihw_driver);
+       platform_driver_unregister(&omap_dsihw_driver);
 }
index bd2d5e159463c3c49a2f642a52814f8e8d83922f..d2b57197b292086cd95a32ef1b33136c0e216166 100644 (file)
@@ -62,6 +62,9 @@ struct dss_reg {
 #define REG_FLD_MOD(idx, val, start, end) \
        dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
 
+static int dss_runtime_get(void);
+static void dss_runtime_put(void);
+
 static struct {
        struct platform_device *pdev;
        void __iomem    *base;
@@ -277,7 +280,7 @@ void dss_dump_clocks(struct seq_file *s)
        dss_runtime_put();
 }
 
-void dss_dump_regs(struct seq_file *s)
+static void dss_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
 
@@ -322,6 +325,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
                break;
        default:
                BUG();
+               return;
        }
 
        dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
@@ -335,7 +339,7 @@ void dss_select_dsi_clk_source(int dsi_module,
                enum omap_dss_clk_source clk_src)
 {
        struct platform_device *dsidev;
-       int b;
+       int b, pos;
 
        switch (clk_src) {
        case OMAP_DSS_CLK_SRC_FCK:
@@ -355,9 +359,11 @@ void dss_select_dsi_clk_source(int dsi_module,
                break;
        default:
                BUG();
+               return;
        }
 
-       REG_FLD_MOD(DSS_CONTROL, b, 1, 1);      /* DSI_CLK_SWITCH */
+       pos = dsi_module == 0 ? 1 : 10;
+       REG_FLD_MOD(DSS_CONTROL, b, pos, pos);  /* DSIx_CLK_SWITCH */
 
        dss.dsi_clk_source[dsi_module] = clk_src;
 }
@@ -389,6 +395,7 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
                break;
        default:
                BUG();
+               return;
        }
 
        pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
@@ -706,7 +713,7 @@ static void dss_put_clocks(void)
        clk_put(dss.dss_clk);
 }
 
-int dss_runtime_get(void)
+static int dss_runtime_get(void)
 {
        int r;
 
@@ -717,14 +724,14 @@ int dss_runtime_get(void)
        return r < 0 ? r : 0;
 }
 
-void dss_runtime_put(void)
+static void dss_runtime_put(void)
 {
        int r;
 
        DSSDBG("dss_runtime_put\n");
 
        r = pm_runtime_put_sync(&dss.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
 }
 
 /* DEBUGFS */
@@ -740,7 +747,7 @@ void dss_debug_dump_clocks(struct seq_file *s)
 #endif
 
 /* DSS HW IP initialisation */
-static int omap_dsshw_probe(struct platform_device *pdev)
+static int __init omap_dsshw_probe(struct platform_device *pdev)
 {
        struct resource *dss_mem;
        u32 rev;
@@ -785,40 +792,24 @@ static int omap_dsshw_probe(struct platform_device *pdev)
        dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
        dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
 
-       r = dpi_init();
-       if (r) {
-               DSSERR("Failed to initialize DPI\n");
-               goto err_dpi;
-       }
-
-       r = sdi_init();
-       if (r) {
-               DSSERR("Failed to initialize SDI\n");
-               goto err_sdi;
-       }
-
        rev = dss_read_reg(DSS_REVISION);
        printk(KERN_INFO "OMAP DSS rev %d.%d\n",
                        FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
 
        dss_runtime_put();
 
+       dss_debugfs_create_file("dss", dss_dump_regs);
+
        return 0;
-err_sdi:
-       dpi_exit();
-err_dpi:
-       dss_runtime_put();
+
 err_runtime_get:
        pm_runtime_disable(&pdev->dev);
        dss_put_clocks();
        return r;
 }
 
-static int omap_dsshw_remove(struct platform_device *pdev)
+static int __exit omap_dsshw_remove(struct platform_device *pdev)
 {
-       dpi_exit();
-       sdi_exit();
-
        pm_runtime_disable(&pdev->dev);
 
        dss_put_clocks();
@@ -829,11 +820,24 @@ static int omap_dsshw_remove(struct platform_device *pdev)
 static int dss_runtime_suspend(struct device *dev)
 {
        dss_save_context();
+       dss_set_min_bus_tput(dev, 0);
        return 0;
 }
 
 static int dss_runtime_resume(struct device *dev)
 {
+       int r;
+       /*
+        * Set an arbitrarily high tput request to ensure OPP100.
+        * What we should really do is to make a request to stay in OPP100,
+        * without any tput requirements, but that is not currently possible
+        * via the PM layer.
+        */
+
+       r = dss_set_min_bus_tput(dev, 1000000000);
+       if (r)
+               return r;
+
        dss_restore_context();
        return 0;
 }
@@ -844,8 +848,7 @@ static const struct dev_pm_ops dss_pm_ops = {
 };
 
 static struct platform_driver omap_dsshw_driver = {
-       .probe          = omap_dsshw_probe,
-       .remove         = omap_dsshw_remove,
+       .remove         = __exit_p(omap_dsshw_remove),
        .driver         = {
                .name   = "omapdss_dss",
                .owner  = THIS_MODULE,
@@ -853,12 +856,12 @@ static struct platform_driver omap_dsshw_driver = {
        },
 };
 
-int dss_init_platform_driver(void)
+int __init dss_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dsshw_driver);
+       return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
 }
 
 void dss_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_dsshw_driver);
+       platform_driver_unregister(&omap_dsshw_driver);
 }
index d4b3dff2ead338db918ce4801ac7380966f6b679..dd1092ceaeef91d0390c23d152b4c65f680cbc09 100644 (file)
@@ -150,9 +150,6 @@ struct dsi_clock_info {
        u16 regm_dsi;   /* OMAP3: REGM4
                         * OMAP4: REGM5 */
        u16 lp_clk_div;
-
-       u8 highfreq;
-       bool use_sys_clk;
 };
 
 struct seq_file;
@@ -162,6 +159,16 @@ struct platform_device;
 struct bus_type *dss_get_bus(void);
 struct regulator *dss_get_vdds_dsi(void);
 struct regulator *dss_get_vdds_sdi(void);
+int dss_get_ctx_loss_count(struct device *dev);
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+               struct device *parent, int disp_num);
+void omap_dss_unregister_device(struct omap_dss_device *dssdev);
+void omap_dss_unregister_child_devices(struct device *parent);
 
 /* apply */
 void dss_apply_init(void);
@@ -179,6 +186,9 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
 int dss_mgr_set_device(struct omap_overlay_manager *mgr,
                struct omap_dss_device *dssdev);
 int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings);
+const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
 
 bool dss_ovl_is_enabled(struct omap_overlay *ovl);
 int dss_ovl_enable(struct omap_overlay *ovl);
@@ -208,9 +218,11 @@ int dss_init_overlay_managers(struct platform_device *pdev);
 void dss_uninit_overlay_managers(struct platform_device *pdev);
 int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
                const struct omap_overlay_manager_info *info);
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+               const struct omap_video_timings *timings);
 int dss_mgr_check(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev,
                struct omap_overlay_manager_info *info,
+               const struct omap_video_timings *mgr_timings,
                struct omap_overlay_info **overlay_infos);
 
 /* overlay */
@@ -220,22 +232,18 @@ void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
 void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
 int dss_ovl_simple_check(struct omap_overlay *ovl,
                const struct omap_overlay_info *info);
-int dss_ovl_check(struct omap_overlay *ovl,
-               struct omap_overlay_info *info, struct omap_dss_device *dssdev);
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+               const struct omap_video_timings *mgr_timings);
 
 /* DSS */
-int dss_init_platform_driver(void);
+int dss_init_platform_driver(void) __init;
 void dss_uninit_platform_driver(void);
 
-int dss_runtime_get(void);
-void dss_runtime_put(void);
-
 void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
 enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
 const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
 void dss_dump_clocks(struct seq_file *s);
 
-void dss_dump_regs(struct seq_file *s);
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 void dss_debug_dump_clocks(struct seq_file *s);
 #endif
@@ -265,19 +273,8 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
                struct dispc_clock_info *dispc_cinfo);
 
 /* SDI */
-#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init(void);
-void sdi_exit(void);
-int sdi_init_display(struct omap_dss_device *display);
-#else
-static inline int sdi_init(void)
-{
-       return 0;
-}
-static inline void sdi_exit(void)
-{
-}
-#endif
+int sdi_init_platform_driver(void) __init;
+void sdi_uninit_platform_driver(void) __exit;
 
 /* DSI */
 #ifdef CONFIG_OMAP2_DSS_DSI
@@ -285,19 +282,14 @@ static inline void sdi_exit(void)
 struct dentry;
 struct file_operations;
 
-int dsi_init_platform_driver(void);
-void dsi_uninit_platform_driver(void);
+int dsi_init_platform_driver(void) __init;
+void dsi_uninit_platform_driver(void) __exit;
 
 int dsi_runtime_get(struct platform_device *dsidev);
 void dsi_runtime_put(struct platform_device *dsidev);
 
 void dsi_dump_clocks(struct seq_file *s);
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops);
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops);
 
-int dsi_init_display(struct omap_dss_device *display);
 void dsi_irq_handler(void);
 u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
 
@@ -314,13 +306,6 @@ void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
 void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
 struct platform_device *dsi_get_dsidev_from_id(int module);
 #else
-static inline int dsi_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void dsi_uninit_platform_driver(void)
-{
-}
 static inline int dsi_runtime_get(struct platform_device *dsidev)
 {
        return 0;
@@ -377,28 +362,14 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
 #endif
 
 /* DPI */
-#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init(void);
-void dpi_exit(void);
-int dpi_init_display(struct omap_dss_device *dssdev);
-#else
-static inline int dpi_init(void)
-{
-       return 0;
-}
-static inline void dpi_exit(void)
-{
-}
-#endif
+int dpi_init_platform_driver(void) __init;
+void dpi_uninit_platform_driver(void) __exit;
 
 /* DISPC */
-int dispc_init_platform_driver(void);
-void dispc_uninit_platform_driver(void);
+int dispc_init_platform_driver(void) __init;
+void dispc_uninit_platform_driver(void) __exit;
 void dispc_dump_clocks(struct seq_file *s);
-void dispc_dump_irqs(struct seq_file *s);
-void dispc_dump_regs(struct seq_file *s);
 void dispc_irq_handler(void);
-void dispc_fake_vsync_irq(void);
 
 int dispc_runtime_get(void);
 void dispc_runtime_put(void);
@@ -409,12 +380,12 @@ void dispc_disable_sidle(void);
 void dispc_lcd_enable_signal_polarity(bool act_high);
 void dispc_lcd_enable_signal(bool enable);
 void dispc_pck_free_enable(bool enable);
-void dispc_set_digit_size(u16 width, u16 height);
 void dispc_enable_fifomerge(bool enable);
 void dispc_enable_gamma_table(bool enable);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+               const struct omap_video_timings *timings);
 unsigned long dispc_fclk_rate(void);
 void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
                struct dispc_clock_info *cinfo);
@@ -424,15 +395,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
 
 void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
 void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
-               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge);
+               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+               bool manual_update);
 int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
-               bool ilace, bool replication);
+               bool ilace, bool replication,
+               const struct omap_video_timings *mgr_timings);
 int dispc_ovl_enable(enum omap_plane plane, bool enable);
 void dispc_ovl_set_channel_out(enum omap_plane plane,
                enum omap_channel channel);
 
 void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
 u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
 bool dispc_mgr_go_busy(enum omap_channel channel);
@@ -445,12 +417,13 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
 void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
 void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
                enum omap_lcd_display_type type);
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
                struct omap_video_timings *timings);
 void dispc_mgr_set_pol_freq(enum omap_channel channel,
                enum omap_panel_config config, u8 acbi, u8 acb);
 unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
 unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+unsigned long dispc_core_clk_rate(void);
 int dispc_mgr_set_clock_div(enum omap_channel channel,
                struct dispc_clock_info *cinfo);
 int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -460,19 +433,10 @@ void dispc_mgr_setup(enum omap_channel channel,
 
 /* VENC */
 #ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init_platform_driver(void);
-void venc_uninit_platform_driver(void);
-void venc_dump_regs(struct seq_file *s);
-int venc_init_display(struct omap_dss_device *display);
+int venc_init_platform_driver(void) __init;
+void venc_uninit_platform_driver(void) __exit;
 unsigned long venc_get_pixel_clock(void);
 #else
-static inline int venc_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void venc_uninit_platform_driver(void)
-{
-}
 static inline unsigned long venc_get_pixel_clock(void)
 {
        WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
@@ -482,23 +446,10 @@ static inline unsigned long venc_get_pixel_clock(void)
 
 /* HDMI */
 #ifdef CONFIG_OMAP4_DSS_HDMI
-int hdmi_init_platform_driver(void);
-void hdmi_uninit_platform_driver(void);
-int hdmi_init_display(struct omap_dss_device *dssdev);
+int hdmi_init_platform_driver(void) __init;
+void hdmi_uninit_platform_driver(void) __exit;
 unsigned long hdmi_get_pixel_clock(void);
-void hdmi_dump_regs(struct seq_file *s);
 #else
-static inline int hdmi_init_display(struct omap_dss_device *dssdev)
-{
-       return 0;
-}
-static inline int hdmi_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void hdmi_uninit_platform_driver(void)
-{
-}
 static inline unsigned long hdmi_get_pixel_clock(void)
 {
        WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
@@ -514,22 +465,18 @@ int omapdss_hdmi_read_edid(u8 *buf, int len);
 bool omapdss_hdmi_detect(void);
 int hdmi_panel_init(void);
 void hdmi_panel_exit(void);
+#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
+int hdmi_audio_enable(void);
+void hdmi_audio_disable(void);
+int hdmi_audio_start(void);
+void hdmi_audio_stop(void);
+bool hdmi_mode_has_audio(void);
+int hdmi_audio_config(struct omap_dss_audio *audio);
+#endif
 
 /* RFBI */
-#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init_platform_driver(void);
-void rfbi_uninit_platform_driver(void);
-void rfbi_dump_regs(struct seq_file *s);
-int rfbi_init_display(struct omap_dss_device *display);
-#else
-static inline int rfbi_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void rfbi_uninit_platform_driver(void)
-{
-}
-#endif
+int rfbi_init_platform_driver(void) __init;
+void rfbi_uninit_platform_driver(void) __exit;
 
 
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
index ce14aa6dd672802dd61659cdfb13201c8fabba0c..938709724f0cda2f6201c52b9548410e449eab49 100644 (file)
@@ -52,6 +52,8 @@ struct omap_dss_features {
        const char * const *clksrc_names;
        const struct dss_param_range *dss_params;
 
+       const enum omap_dss_rotation_type supported_rotation_types;
+
        const u32 buffer_size_unit;
        const u32 burst_size_unit;
 };
@@ -311,6 +313,8 @@ static const struct dss_param_range omap2_dss_param_range[] = {
         * scaler cannot scale a image with width more than 768.
         */
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 768 },
+       [FEAT_PARAM_MGR_WIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_HEIGHT]                 = { 1, 2048 },
 };
 
 static const struct dss_param_range omap3_dss_param_range[] = {
@@ -324,6 +328,8 @@ static const struct dss_param_range omap3_dss_param_range[] = {
        [FEAT_PARAM_DSIPLL_LPDIV]               = { 1, (1 << 13) - 1},
        [FEAT_PARAM_DOWNSCALE]                  = { 1, 4 },
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 1024 },
+       [FEAT_PARAM_MGR_WIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_HEIGHT]                 = { 1, 2048 },
 };
 
 static const struct dss_param_range omap4_dss_param_range[] = {
@@ -337,6 +343,8 @@ static const struct dss_param_range omap4_dss_param_range[] = {
        [FEAT_PARAM_DSIPLL_LPDIV]               = { 0, (1 << 13) - 1 },
        [FEAT_PARAM_DOWNSCALE]                  = { 1, 4 },
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_WIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_HEIGHT]                 = { 1, 2048 },
 };
 
 static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -399,6 +407,7 @@ static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
        FEAT_FIR_COEF_V,
        FEAT_ALPHA_FREE_ZORDER,
        FEAT_FIFO_MERGE,
+       FEAT_BURST_2D,
 };
 
 static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
@@ -416,6 +425,7 @@ static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
        FEAT_FIR_COEF_V,
        FEAT_ALPHA_FREE_ZORDER,
        FEAT_FIFO_MERGE,
+       FEAT_BURST_2D,
 };
 
 static const enum dss_feat_id omap4_dss_feat_list[] = {
@@ -434,6 +444,7 @@ static const enum dss_feat_id omap4_dss_feat_list[] = {
        FEAT_FIR_COEF_V,
        FEAT_ALPHA_FREE_ZORDER,
        FEAT_FIFO_MERGE,
+       FEAT_BURST_2D,
 };
 
 /* OMAP2 DSS Features */
@@ -451,6 +462,7 @@ static const struct omap_dss_features omap2_dss_features = {
        .overlay_caps = omap2_dss_overlay_caps,
        .clksrc_names = omap2_dss_clk_source_names,
        .dss_params = omap2_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
        .burst_size_unit = 8,
 };
@@ -470,6 +482,7 @@ static const struct omap_dss_features omap3430_dss_features = {
        .overlay_caps = omap3430_dss_overlay_caps,
        .clksrc_names = omap3_dss_clk_source_names,
        .dss_params = omap3_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
        .burst_size_unit = 8,
 };
@@ -488,6 +501,7 @@ static const struct omap_dss_features omap3630_dss_features = {
        .overlay_caps = omap3630_dss_overlay_caps,
        .clksrc_names = omap3_dss_clk_source_names,
        .dss_params = omap3_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
        .burst_size_unit = 8,
 };
@@ -508,6 +522,7 @@ static const struct omap_dss_features omap4430_es1_0_dss_features  = {
        .overlay_caps = omap4_dss_overlay_caps,
        .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
        .burst_size_unit = 16,
 };
@@ -527,6 +542,7 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
        .overlay_caps = omap4_dss_overlay_caps,
        .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
        .burst_size_unit = 16,
 };
@@ -546,6 +562,7 @@ static const struct omap_dss_features omap4_dss_features = {
        .overlay_caps = omap4_dss_overlay_caps,
        .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
        .burst_size_unit = 16,
 };
@@ -562,13 +579,17 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
        .pll_enable             =       ti_hdmi_4xxx_pll_enable,
        .pll_disable            =       ti_hdmi_4xxx_pll_disable,
        .video_enable           =       ti_hdmi_4xxx_wp_video_start,
+       .video_disable          =       ti_hdmi_4xxx_wp_video_stop,
        .dump_wrapper           =       ti_hdmi_4xxx_wp_dump,
        .dump_core              =       ti_hdmi_4xxx_core_dump,
        .dump_pll               =       ti_hdmi_4xxx_pll_dump,
        .dump_phy               =       ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
        .audio_enable           =       ti_hdmi_4xxx_wp_audio_enable,
+       .audio_disable          =       ti_hdmi_4xxx_wp_audio_disable,
+       .audio_start            =       ti_hdmi_4xxx_audio_start,
+       .audio_stop             =       ti_hdmi_4xxx_audio_stop,
+       .audio_config           =       ti_hdmi_4xxx_audio_config,
 #endif
 
 };
@@ -662,6 +683,11 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
        *end = omap_current_dss_features->reg_fields[id].end;
 }
 
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
+{
+       return omap_current_dss_features->supported_rotation_types & rot_type;
+}
+
 void dss_features_init(void)
 {
        if (cpu_is_omap24xx())
index c332e7ddfce14dfedfecad805f1eae2c5478d27e..bdf469f080e75e3742cb350541ec08362fb2f683 100644 (file)
@@ -62,6 +62,7 @@ enum dss_feat_id {
        FEAT_FIFO_MERGE,
        /* An unknown HW bug causing the normal FIFO thresholds not to work */
        FEAT_OMAP3_DSI_FIFO_BUG,
+       FEAT_BURST_2D,
 };
 
 /* DSS register field id */
@@ -91,6 +92,8 @@ enum dss_range_param {
        FEAT_PARAM_DSIPLL_LPDIV,
        FEAT_PARAM_DOWNSCALE,
        FEAT_PARAM_LINEWIDTH,
+       FEAT_PARAM_MGR_WIDTH,
+       FEAT_PARAM_MGR_HEIGHT,
 };
 
 /* DSS Feature Functions */
@@ -108,6 +111,8 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
 u32 dss_feat_get_buffer_size_unit(void);       /* in bytes */
 u32 dss_feat_get_burst_size_unit(void);                /* in bytes */
 
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
+
 bool dss_has_feature(enum dss_feat_id id);
 void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
 void dss_features_init(void);
index c4b4f6950a9269a925b8617865f4a4a87fb89d06..26a2430a70288d6cf468357d854fe3429b245f72 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <video/omapdss.h>
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include "ti_hdmi_4xxx_ip.h"
-#endif
 
 #include "ti_hdmi.h"
 #include "dss.h"
@@ -63,7 +57,6 @@
 
 static struct {
        struct mutex lock;
-       struct omap_display_platform_data *pdata;
        struct platform_device *pdev;
        struct hdmi_ip_data ip_data;
 
@@ -130,25 +123,12 @@ static int hdmi_runtime_get(void)
 
        DSSDBG("hdmi_runtime_get\n");
 
-       /*
-        * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
-        * This should be removed later.
-        */
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = pm_runtime_get_sync(&hdmi.pdev->dev);
        WARN_ON(r < 0);
        if (r < 0)
-               goto err_get_hdmi;
+               return r;
 
        return 0;
-
-err_get_hdmi:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static void hdmi_runtime_put(void)
@@ -158,16 +138,10 @@ static void hdmi_runtime_put(void)
        DSSDBG("hdmi_runtime_put\n");
 
        r = pm_runtime_put_sync(&hdmi.pdev->dev);
-       WARN_ON(r < 0);
-
-       /*
-        * HACK: This is added to complement the dss_runtime_get() call in
-        * hdmi_runtime_get(). This should be removed later.
-        */
-       dss_runtime_put();
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
-int hdmi_init_display(struct omap_dss_device *dssdev)
+static int __init hdmi_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("init_display\n");
 
@@ -344,7 +318,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
 
        hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
 
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+       hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
 
        /* config the PLL and PHY hdmi_set_pll_pwrfirst */
        r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
@@ -376,10 +350,11 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
        dispc_enable_gamma_table(0);
 
        /* tv size */
-       dispc_set_digit_size(dssdev->panel.timings.x_res,
-                       dssdev->panel.timings.y_res);
+       dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
+       r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
+       if (r)
+               goto err_vid_enable;
 
        r = dss_mgr_enable(dssdev->manager);
        if (r)
@@ -388,7 +363,8 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
        return 0;
 
 err_mgr_enable:
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+       hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
+err_vid_enable:
        hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
        hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
 err:
@@ -400,7 +376,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
 {
        dss_mgr_disable(dssdev->manager);
 
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+       hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
        hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
        hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
        hdmi_runtime_put();
@@ -436,10 +412,12 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
                r = hdmi_power_on(dssdev);
                if (r)
                        DSSERR("failed to power on device\n");
+       } else {
+               dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
        }
 }
 
-void hdmi_dump_regs(struct seq_file *s)
+static void hdmi_dump_regs(struct seq_file *s)
 {
        mutex_lock(&hdmi.lock);
 
@@ -555,248 +533,201 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
        mutex_unlock(&hdmi.lock);
 }
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-
-static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
-                               struct snd_soc_dai *dai)
+static int hdmi_get_clocks(struct platform_device *pdev)
 {
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_codec *codec = rtd->codec;
-       struct platform_device *pdev = to_platform_device(codec->dev);
-       struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
-       int err = 0;
+       struct clk *clk;
 
-       if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) {
-               dev_err(&pdev->dev, "Cannot enable/disable audio\n");
-               return -ENODEV;
+       clk = clk_get(&pdev->dev, "sys_clk");
+       if (IS_ERR(clk)) {
+               DSSERR("can't get sys_clk\n");
+               return PTR_ERR(clk);
        }
 
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               ip_data->ops->audio_enable(ip_data, true);
-               break;
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ip_data->ops->audio_enable(ip_data, false);
-               break;
-       default:
-               err = -EINVAL;
-       }
-       return err;
-}
-
-static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
-                                   struct snd_pcm_hw_params *params,
-                                   struct snd_soc_dai *dai)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_codec *codec = rtd->codec;
-       struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
-       struct hdmi_audio_format audio_format;
-       struct hdmi_audio_dma audio_dma;
-       struct hdmi_core_audio_config core_cfg;
-       struct hdmi_core_infoframe_audio aud_if_cfg;
-       int err, n, cts;
-       enum hdmi_core_audio_sample_freq sample_freq;
-
-       switch (params_format(params)) {
-       case SNDRV_PCM_FORMAT_S16_LE:
-               core_cfg.i2s_cfg.word_max_length =
-                       HDMI_AUDIO_I2S_MAX_WORD_20BITS;
-               core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
-               core_cfg.i2s_cfg.in_length_bits =
-                       HDMI_AUDIO_I2S_INPUT_LENGTH_16;
-               core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
-               audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
-               audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-               audio_dma.transfer_size = 0x10;
-               break;
-       case SNDRV_PCM_FORMAT_S24_LE:
-               core_cfg.i2s_cfg.word_max_length =
-                       HDMI_AUDIO_I2S_MAX_WORD_24BITS;
-               core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
-               core_cfg.i2s_cfg.in_length_bits =
-                       HDMI_AUDIO_I2S_INPUT_LENGTH_24;
-               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
-               audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
-               audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-               core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-               audio_dma.transfer_size = 0x20;
-               break;
-       default:
+       hdmi.sys_clk = clk;
+
+       return 0;
+}
+
+static void hdmi_put_clocks(void)
+{
+       if (hdmi.sys_clk)
+               clk_put(hdmi.sys_clk);
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
+{
+       u32 deep_color;
+       bool deep_color_correct = false;
+       u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
+
+       if (n == NULL || cts == NULL)
                return -EINVAL;
-       }
 
-       switch (params_rate(params)) {
+       /* TODO: When implemented, query deep color mode here. */
+       deep_color = 100;
+
+       /*
+        * When using deep color, the default N value (as in the HDMI
+        * specification) yields to an non-integer CTS. Hence, we
+        * modify it while keeping the restrictions described in
+        * section 7.2.1 of the HDMI 1.4a specification.
+        */
+       switch (sample_freq) {
        case 32000:
-               sample_freq = HDMI_AUDIO_FS_32000;
+       case 48000:
+       case 96000:
+       case 192000:
+               if (deep_color == 125)
+                       if (pclk == 27027 || pclk == 74250)
+                               deep_color_correct = true;
+               if (deep_color == 150)
+                       if (pclk == 27027)
+                               deep_color_correct = true;
                break;
        case 44100:
-               sample_freq = HDMI_AUDIO_FS_44100;
-               break;
-       case 48000:
-               sample_freq = HDMI_AUDIO_FS_48000;
+       case 88200:
+       case 176400:
+               if (deep_color == 125)
+                       if (pclk == 27027)
+                               deep_color_correct = true;
                break;
        default:
                return -EINVAL;
        }
 
-       err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts);
-       if (err < 0)
-               return err;
-
-       /* Audio wrapper config */
-       audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
-       audio_format.active_chnnls_msk = 0x03;
-       audio_format.type = HDMI_AUDIO_TYPE_LPCM;
-       audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
-       /* Disable start/stop signals of IEC 60958 blocks */
-       audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
+       if (deep_color_correct) {
+               switch (sample_freq) {
+               case 32000:
+                       *n = 8192;
+                       break;
+               case 44100:
+                       *n = 12544;
+                       break;
+               case 48000:
+                       *n = 8192;
+                       break;
+               case 88200:
+                       *n = 25088;
+                       break;
+               case 96000:
+                       *n = 16384;
+                       break;
+               case 176400:
+                       *n = 50176;
+                       break;
+               case 192000:
+                       *n = 32768;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else {
+               switch (sample_freq) {
+               case 32000:
+                       *n = 4096;
+                       break;
+               case 44100:
+                       *n = 6272;
+                       break;
+               case 48000:
+                       *n = 6144;
+                       break;
+               case 88200:
+                       *n = 12544;
+                       break;
+               case 96000:
+                       *n = 12288;
+                       break;
+               case 176400:
+                       *n = 25088;
+                       break;
+               case 192000:
+                       *n = 24576;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+       /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+       *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
 
-       audio_dma.block_size = 0xC0;
-       audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
-       audio_dma.fifo_threshold = 0x20; /* in number of samples */
+       return 0;
+}
 
-       hdmi_wp_audio_config_dma(ip_data, &audio_dma);
-       hdmi_wp_audio_config_format(ip_data, &audio_format);
+int hdmi_audio_enable(void)
+{
+       DSSDBG("audio_enable\n");
 
-       /*
-        * I2S config
-        */
-       core_cfg.i2s_cfg.en_high_bitrate_aud = false;
-       /* Only used with high bitrate audio */
-       core_cfg.i2s_cfg.cbit_order = false;
-       /* Serial data and word select should change on sck rising edge */
-       core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
-       core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
-       /* Set I2S word select polarity */
-       core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
-       core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
-       /* Set serial data to word select shift. See Phillips spec. */
-       core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
-       /* Enable one of the four available serial data channels */
-       core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
-
-       /* Core audio config */
-       core_cfg.freq_sample = sample_freq;
-       core_cfg.n = n;
-       core_cfg.cts = cts;
-       if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
-               core_cfg.aud_par_busclk = 0;
-               core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
-               core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
-       } else {
-               core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
-               core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
-               core_cfg.use_mclk = true;
-       }
+       return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
+}
 
-       if (core_cfg.use_mclk)
-               core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
-       core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
-       core_cfg.en_spdif = false;
-       /* Use sample frequency from channel status word */
-       core_cfg.fs_override = true;
-       /* Enable ACR packets */
-       core_cfg.en_acr_pkt = true;
-       /* Disable direct streaming digital audio */
-       core_cfg.en_dsd_audio = false;
-       /* Use parallel audio interface */
-       core_cfg.en_parallel_aud_input = true;
-
-       hdmi_core_audio_config(ip_data, &core_cfg);
+void hdmi_audio_disable(void)
+{
+       DSSDBG("audio_disable\n");
 
-       /*
-        * Configure packet
-        * info frame audio see doc CEA861-D page 74
-        */
-       aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
-       aud_if_cfg.db1_channel_count = 2;
-       aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
-       aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
-       aud_if_cfg.db4_channel_alloc = 0x00;
-       aud_if_cfg.db5_downmix_inh = false;
-       aud_if_cfg.db5_lsv = 0;
-
-       hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
-       return 0;
+       hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
 }
 
-static int hdmi_audio_startup(struct snd_pcm_substream *substream,
-                                 struct snd_soc_dai *dai)
+int hdmi_audio_start(void)
 {
-       if (!hdmi.ip_data.cfg.cm.mode) {
-               pr_err("Current video settings do not support audio.\n");
-               return -EIO;
-       }
-       return 0;
+       DSSDBG("audio_start\n");
+
+       return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
 }
 
-static int hdmi_audio_codec_probe(struct snd_soc_codec *codec)
+void hdmi_audio_stop(void)
 {
-       struct hdmi_ip_data *priv = &hdmi.ip_data;
+       DSSDBG("audio_stop\n");
 
-       snd_soc_codec_set_drvdata(codec, priv);
-       return 0;
+       hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
 }
 
-static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
-       .probe = hdmi_audio_codec_probe,
-};
+bool hdmi_mode_has_audio(void)
+{
+       if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
+               return true;
+       else
+               return false;
+}
 
-static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
-       .hw_params = hdmi_audio_hw_params,
-       .trigger = hdmi_audio_trigger,
-       .startup = hdmi_audio_startup,
-};
+int hdmi_audio_config(struct omap_dss_audio *audio)
+{
+       return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
+}
 
-static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
-               .name = "hdmi-audio-codec",
-               .playback = {
-                       .channels_min = 2,
-                       .channels_max = 2,
-                       .rates = SNDRV_PCM_RATE_32000 |
-                               SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
-                       .formats = SNDRV_PCM_FMTBIT_S16_LE |
-                               SNDRV_PCM_FMTBIT_S24_LE,
-               },
-               .ops = &hdmi_audio_codec_ops,
-};
 #endif
 
-static int hdmi_get_clocks(struct platform_device *pdev)
+static void __init hdmi_probe_pdata(struct platform_device *pdev)
 {
-       struct clk *clk;
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int r, i;
 
-       clk = clk_get(&pdev->dev, "sys_clk");
-       if (IS_ERR(clk)) {
-               DSSERR("can't get sys_clk\n");
-               return PTR_ERR(clk);
-       }
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
 
-       hdmi.sys_clk = clk;
+               if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
+                       continue;
 
-       return 0;
-}
+               r = hdmi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
 
-static void hdmi_put_clocks(void)
-{
-       if (hdmi.sys_clk)
-               clk_put(hdmi.sys_clk);
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
 }
 
 /* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
+static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
 {
        struct resource *hdmi_mem;
        int r;
 
-       hdmi.pdata = pdev->dev.platform_data;
        hdmi.pdev = pdev;
 
        mutex_init(&hdmi.lock);
@@ -830,28 +761,18 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
 
        hdmi_panel_init();
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+       dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+
+       hdmi_probe_pdata(pdev);
 
-       /* Register ASoC codec DAI */
-       r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
-                                       &hdmi_codec_dai_drv, 1);
-       if (r) {
-               DSSERR("can't register ASoC HDMI audio codec\n");
-               return r;
-       }
-#endif
        return 0;
 }
 
-static int omapdss_hdmihw_remove(struct platform_device *pdev)
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
 {
-       hdmi_panel_exit();
+       omap_dss_unregister_child_devices(&pdev->dev);
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-       snd_soc_unregister_codec(&pdev->dev);
-#endif
+       hdmi_panel_exit();
 
        pm_runtime_disable(&pdev->dev);
 
@@ -867,7 +788,6 @@ static int hdmi_runtime_suspend(struct device *dev)
        clk_disable(hdmi.sys_clk);
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -876,23 +796,13 @@ static int hdmi_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r < 0)
-               goto err_get_dispc;
-
+               return r;
 
        clk_enable(hdmi.sys_clk);
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops hdmi_pm_ops = {
@@ -901,8 +811,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
 };
 
 static struct platform_driver omapdss_hdmihw_driver = {
-       .probe          = omapdss_hdmihw_probe,
-       .remove         = omapdss_hdmihw_remove,
+       .remove         = __exit_p(omapdss_hdmihw_remove),
        .driver         = {
                .name   = "omapdss_hdmi",
                .owner  = THIS_MODULE,
@@ -910,12 +819,12 @@ static struct platform_driver omapdss_hdmihw_driver = {
        },
 };
 
-int hdmi_init_platform_driver(void)
+int __init hdmi_init_platform_driver(void)
 {
-       return platform_driver_register(&omapdss_hdmihw_driver);
+       return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
 }
 
-void hdmi_uninit_platform_driver(void)
+void __exit hdmi_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omapdss_hdmihw_driver);
+       platform_driver_unregister(&omapdss_hdmihw_driver);
 }
index 533d5dc634d256374a22b9a2da8a8eb0453f9a34..1179e3c4b1c76565336b8e4a6041c5bc49da0964 100644 (file)
 #include "dss.h"
 
 static struct {
-       struct mutex hdmi_lock;
+       /* This protects the panel ops, mainly when accessing the HDMI IP. */
+       struct mutex lock;
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+       /* This protects the audio ops, specifically. */
+       spinlock_t audio_lock;
+#endif
 } hdmi;
 
 
@@ -54,12 +59,168 @@ static void hdmi_panel_remove(struct omap_dss_device *dssdev)
 
 }
 
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+       int r;
+
+       mutex_lock(&hdmi.lock);
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       /* enable audio only if the display is active and supports audio */
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+           !hdmi_mode_has_audio()) {
+               DSSERR("audio not supported or display is off\n");
+               r = -EPERM;
+               goto err;
+       }
+
+       r = hdmi_audio_enable();
+
+       if (!r)
+               dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+err:
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+       mutex_unlock(&hdmi.lock);
+       return r;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       hdmi_audio_disable();
+
+       dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
+
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+       /*
+        * No need to check the panel state. It was checked when trasitioning
+        * to AUDIO_ENABLED.
+        */
+       if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
+               DSSERR("audio start from invalid state\n");
+               r = -EPERM;
+               goto err;
+       }
+
+       r = hdmi_audio_start();
+
+       if (!r)
+               dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
+
+err:
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+       return r;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       hdmi_audio_stop();
+       dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+       bool r = false;
+
+       mutex_lock(&hdmi.lock);
+
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+               goto err;
+
+       if (!hdmi_mode_has_audio())
+               goto err;
+
+       r = true;
+err:
+       mutex_unlock(&hdmi.lock);
+       return r;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+               struct omap_dss_audio *audio)
+{
+       unsigned long flags;
+       int r;
+
+       mutex_lock(&hdmi.lock);
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       /* config audio only if the display is active and supports audio */
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+           !hdmi_mode_has_audio()) {
+               DSSERR("audio not supported or display is off\n");
+               r = -EPERM;
+               goto err;
+       }
+
+       r = hdmi_audio_config(audio);
+
+       if (!r)
+               dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
+
+err:
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+       mutex_unlock(&hdmi.lock);
+       return r;
+}
+
+#else
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+       return -EPERM;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+       return -EPERM;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+       return false;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+               struct omap_dss_audio *audio)
+{
+       return -EPERM;
+}
+#endif
+
 static int hdmi_panel_enable(struct omap_dss_device *dssdev)
 {
        int r = 0;
        DSSDBG("ENTER hdmi_panel_enable\n");
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
                r = -EINVAL;
@@ -75,40 +236,52 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev)
        dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
 
 static void hdmi_panel_disable(struct omap_dss_device *dssdev)
 {
-       mutex_lock(&hdmi.hdmi_lock);
-
-       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+       mutex_lock(&hdmi.lock);
+
+       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+               /*
+                * TODO: notify audio users that the display was disabled. For
+                * now, disable audio locally to not break our audio state
+                * machine.
+                */
+               hdmi_panel_audio_disable(dssdev);
                omapdss_hdmi_display_disable(dssdev);
+       }
 
        dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 }
 
 static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
 {
        int r = 0;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
                r = -EINVAL;
                goto err;
        }
 
-       dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+       /*
+        * TODO: notify audio users that the display was suspended. For now,
+        * disable audio locally to not break our audio state machine.
+        */
+       hdmi_panel_audio_disable(dssdev);
 
+       dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
        omapdss_hdmi_display_disable(dssdev);
 
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -117,7 +290,7 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
 {
        int r = 0;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
                r = -EINVAL;
@@ -129,11 +302,12 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
                DSSERR("failed to power on\n");
                goto err;
        }
+       /* TODO: notify audio users that the panel resumed. */
 
        dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -141,11 +315,11 @@ err:
 static void hdmi_get_timings(struct omap_dss_device *dssdev,
                        struct omap_video_timings *timings)
 {
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        *timings = dssdev->panel.timings;
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 }
 
 static void hdmi_set_timings(struct omap_dss_device *dssdev,
@@ -153,12 +327,18 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
 {
        DSSDBG("hdmi_set_timings\n");
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
+
+       /*
+        * TODO: notify audio users that there was a timings change. For
+        * now, disable audio locally to not break our audio state machine.
+        */
+       hdmi_panel_audio_disable(dssdev);
 
        dssdev->panel.timings = *timings;
        omapdss_hdmi_display_set_timing(dssdev);
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 }
 
 static int hdmi_check_timings(struct omap_dss_device *dssdev,
@@ -168,11 +348,11 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
 
        DSSDBG("hdmi_check_timings\n");
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        r = omapdss_hdmi_display_check_timing(dssdev, timings);
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
        return r;
 }
 
@@ -180,7 +360,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
 {
        int r;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
                r = omapdss_hdmi_display_enable(dssdev);
@@ -194,7 +374,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
                        dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
                omapdss_hdmi_display_disable(dssdev);
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -203,7 +383,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
 {
        int r;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
                r = omapdss_hdmi_display_enable(dssdev);
@@ -217,7 +397,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
                        dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
                omapdss_hdmi_display_disable(dssdev);
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -234,6 +414,12 @@ static struct omap_dss_driver hdmi_driver = {
        .check_timings  = hdmi_check_timings,
        .read_edid      = hdmi_read_edid,
        .detect         = hdmi_detect,
+       .audio_enable   = hdmi_panel_audio_enable,
+       .audio_disable  = hdmi_panel_audio_disable,
+       .audio_start    = hdmi_panel_audio_start,
+       .audio_stop     = hdmi_panel_audio_stop,
+       .audio_supported        = hdmi_panel_audio_supported,
+       .audio_config   = hdmi_panel_audio_config,
        .driver                 = {
                .name   = "hdmi_panel",
                .owner  = THIS_MODULE,
@@ -242,7 +428,11 @@ static struct omap_dss_driver hdmi_driver = {
 
 int hdmi_panel_init(void)
 {
-       mutex_init(&hdmi.hdmi_lock);
+       mutex_init(&hdmi.lock);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+       spin_lock_init(&hdmi.audio_lock);
+#endif
 
        omap_dss_register_driver(&hdmi_driver);
 
index e7364603f6a1d09fa193e63ed007565388090e1b..0cbcde4c688a9e40925daf5c12f960f6bb75473b 100644 (file)
@@ -654,9 +654,20 @@ static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
        return 0;
 }
 
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+               const struct omap_video_timings *timings)
+{
+       if (!dispc_mgr_timings_ok(mgr->id, timings)) {
+               DSSERR("check_manager: invalid timings\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int dss_mgr_check(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev,
                struct omap_overlay_manager_info *info,
+               const struct omap_video_timings *mgr_timings,
                struct omap_overlay_info **overlay_infos)
 {
        struct omap_overlay *ovl;
@@ -668,6 +679,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
                        return r;
        }
 
+       r = dss_mgr_check_timings(mgr, mgr_timings);
+       if (r)
+               return r;
+
        list_for_each_entry(ovl, &mgr->overlays, list) {
                struct omap_overlay_info *oi;
                int r;
@@ -677,7 +692,7 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
                if (oi == NULL)
                        continue;
 
-               r = dss_ovl_check(ovl, oi, dssdev);
+               r = dss_ovl_check(ovl, oi, mgr_timings);
                if (r)
                        return r;
        }
index 6e821810deec092324a0f1013fbb3cb32bba72dd..b0ba60f88dd23d3fdf5d5583850ecc7282b4a09d 100644 (file)
@@ -628,19 +628,23 @@ int dss_ovl_simple_check(struct omap_overlay *ovl,
                return -EINVAL;
        }
 
+       if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
+               DSSERR("check_overlay: rotation type %d not supported\n",
+                               info->rotation_type);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
-int dss_ovl_check(struct omap_overlay *ovl,
-               struct omap_overlay_info *info, struct omap_dss_device *dssdev)
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+               const struct omap_video_timings *mgr_timings)
 {
        u16 outw, outh;
        u16 dw, dh;
 
-       if (dssdev == NULL)
-               return 0;
-
-       dssdev->driver->get_resolution(dssdev, &dw, &dh);
+       dw = mgr_timings->x_res;
+       dh = mgr_timings->y_res;
 
        if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
                outw = info->width;
index 788a0ef6323aef6e2784244b1825c5b72965abc5..7985fa12b9b46c8c3f6da328f578a76b6267a66e 100644 (file)
@@ -141,7 +141,7 @@ static void rfbi_runtime_put(void)
        DSSDBG("rfbi_runtime_put\n");
 
        r = pm_runtime_put_sync(&rfbi.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 void rfbi_bus_lock(void)
@@ -304,13 +304,23 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
                u16 height, void (*callback)(void *data), void *data)
 {
        u32 l;
+       struct omap_video_timings timings = {
+               .hsw            = 1,
+               .hfp            = 1,
+               .hbp            = 1,
+               .vsw            = 1,
+               .vfp            = 0,
+               .vbp            = 0,
+               .x_res          = width,
+               .y_res          = height,
+       };
 
        /*BUG_ON(callback == 0);*/
        BUG_ON(rfbi.framedone_callback != NULL);
 
        DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
 
-       dispc_mgr_set_lcd_size(dssdev->manager->id, width, height);
+       dss_mgr_set_timings(dssdev->manager, &timings);
 
        dispc_mgr_enable(dssdev->manager->id, true);
 
@@ -766,6 +776,16 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
                u16 *x, u16 *y, u16 *w, u16 *h)
 {
        u16 dw, dh;
+       struct omap_video_timings timings = {
+               .hsw            = 1,
+               .hfp            = 1,
+               .hbp            = 1,
+               .vsw            = 1,
+               .vfp            = 0,
+               .vbp            = 0,
+               .x_res          = *w,
+               .y_res          = *h,
+       };
 
        dssdev->driver->get_resolution(dssdev, &dw, &dh);
 
@@ -784,7 +804,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
        if (*w == 0 || *h == 0)
                return -EINVAL;
 
-       dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
+       dss_mgr_set_timings(dssdev->manager, &timings);
 
        return 0;
 }
@@ -799,7 +819,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
 }
 EXPORT_SYMBOL(omap_rfbi_update);
 
-void rfbi_dump_regs(struct seq_file *s)
+static void rfbi_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
 
@@ -900,15 +920,39 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
 }
 EXPORT_SYMBOL(omapdss_rfbi_display_disable);
 
-int rfbi_init_display(struct omap_dss_device *dssdev)
+static int __init rfbi_init_display(struct omap_dss_device *dssdev)
 {
        rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
        return 0;
 }
 
+static void __init rfbi_probe_pdata(struct platform_device *pdev)
+{
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
+                       continue;
+
+               r = rfbi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                               dssdev->name, r);
+       }
+}
+
 /* RFBI HW IP initialisation */
-static int omap_rfbihw_probe(struct platform_device *pdev)
+static int __init omap_rfbihw_probe(struct platform_device *pdev)
 {
        u32 rev;
        struct resource *rfbi_mem;
@@ -956,6 +1000,10 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
 
        rfbi_runtime_put();
 
+       dss_debugfs_create_file("rfbi", rfbi_dump_regs);
+
+       rfbi_probe_pdata(pdev);
+
        return 0;
 
 err_runtime_get:
@@ -963,8 +1011,9 @@ err_runtime_get:
        return r;
 }
 
-static int omap_rfbihw_remove(struct platform_device *pdev)
+static int __exit omap_rfbihw_remove(struct platform_device *pdev)
 {
+       omap_dss_unregister_child_devices(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return 0;
 }
@@ -972,7 +1021,6 @@ static int omap_rfbihw_remove(struct platform_device *pdev)
 static int rfbi_runtime_suspend(struct device *dev)
 {
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -981,20 +1029,11 @@ static int rfbi_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r < 0)
-               goto err_get_dispc;
+               return r;
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops rfbi_pm_ops = {
@@ -1003,8 +1042,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
 };
 
 static struct platform_driver omap_rfbihw_driver = {
-       .probe          = omap_rfbihw_probe,
-       .remove         = omap_rfbihw_remove,
+       .remove         = __exit_p(omap_rfbihw_remove),
        .driver         = {
                .name   = "omapdss_rfbi",
                .owner  = THIS_MODULE,
@@ -1012,12 +1050,12 @@ static struct platform_driver omap_rfbihw_driver = {
        },
 };
 
-int rfbi_init_platform_driver(void)
+int __init rfbi_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_rfbihw_driver);
+       return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
 }
 
-void rfbi_uninit_platform_driver(void)
+void __exit rfbi_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_rfbihw_driver);
+       platform_driver_unregister(&omap_rfbihw_driver);
 }
index 8266ca0d666bc2de7547a12c38eca9dc98aba11a..3a43dc2a9b46c992b22770b163a5e47611b7bb9e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
 #include <linux/export.h>
+#include <linux/platform_device.h>
 
 #include <video/omapdss.h>
 #include "dss.h"
@@ -71,10 +72,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
        if (r)
                goto err_reg_enable;
 
-       r = dss_runtime_get();
-       if (r)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r)
                goto err_get_dispc;
@@ -107,7 +104,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
        }
 
 
-       dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+       dss_mgr_set_timings(dssdev->manager, t);
 
        r = dss_set_clock_div(&dss_cinfo);
        if (r)
@@ -137,8 +134,6 @@ err_set_dss_clock_div:
 err_calc_clock_div:
        dispc_runtime_put();
 err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
        regulator_disable(sdi.vdds_sdi_reg);
 err_reg_enable:
        omap_dss_stop_device(dssdev);
@@ -154,7 +149,6 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
        dss_sdi_disable();
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        regulator_disable(sdi.vdds_sdi_reg);
 
@@ -162,7 +156,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
 }
 EXPORT_SYMBOL(omapdss_sdi_display_disable);
 
-int sdi_init_display(struct omap_dss_device *dssdev)
+static int __init sdi_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("SDI init\n");
 
@@ -182,11 +176,58 @@ int sdi_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-int sdi_init(void)
+static void __init sdi_probe_pdata(struct platform_device *pdev)
+{
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
+                       continue;
+
+               r = sdi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
+static int __init omap_sdi_probe(struct platform_device *pdev)
 {
+       sdi_probe_pdata(pdev);
+
+       return 0;
+}
+
+static int __exit omap_sdi_remove(struct platform_device *pdev)
+{
+       omap_dss_unregister_child_devices(&pdev->dev);
+
        return 0;
 }
 
-void sdi_exit(void)
+static struct platform_driver omap_sdi_driver = {
+       .remove         = __exit_p(omap_sdi_remove),
+       .driver         = {
+               .name   = "omapdss_sdi",
+               .owner  = THIS_MODULE,
+       },
+};
+
+int __init sdi_init_platform_driver(void)
+{
+       return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
+}
+
+void __exit sdi_uninit_platform_driver(void)
 {
+       platform_driver_unregister(&omap_sdi_driver);
 }
index 1f58b84d69015035c134bc5736b86a12bf7b46f8..e734cb444bc7ce30051adeb9e0f1ff69318bf296 100644 (file)
@@ -96,7 +96,9 @@ struct ti_hdmi_ip_ops {
 
        void (*pll_disable)(struct hdmi_ip_data *ip_data);
 
-       void (*video_enable)(struct hdmi_ip_data *ip_data, bool start);
+       int (*video_enable)(struct hdmi_ip_data *ip_data);
+
+       void (*video_disable)(struct hdmi_ip_data *ip_data);
 
        void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
 
@@ -106,9 +108,17 @@ struct ti_hdmi_ip_ops {
 
        void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-       void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+       int (*audio_enable)(struct hdmi_ip_data *ip_data);
+
+       void (*audio_disable)(struct hdmi_ip_data *ip_data);
+
+       int (*audio_start)(struct hdmi_ip_data *ip_data);
+
+       void (*audio_stop)(struct hdmi_ip_data *ip_data);
+
+       int (*audio_config)(struct hdmi_ip_data *ip_data,
+               struct omap_dss_audio *audio);
 #endif
 
 };
@@ -173,7 +183,8 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
 int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
 bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start);
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
 int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
@@ -181,8 +192,13 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+               struct omap_dss_audio *audio);
 #endif
 #endif
index bfe6fe65c8becf4fa2bf0ed671612623effeeb19..4dae1b291079c9e8e00f0719aa22328f176934ab 100644 (file)
 #include <linux/string.h>
 #include <linux/seq_file.h>
 #include <linux/gpio.h>
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#endif
 
 #include "ti_hdmi_4xxx_ip.h"
 #include "dss.h"
+#include "dss_features.h"
 
 static inline void hdmi_write_reg(void __iomem *base_addr,
                                const u16 idx, u32 val)
@@ -298,9 +303,9 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
        REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
 
        r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
-                       NULL, hpd_irq_handler,
-                       IRQF_DISABLED | IRQF_TRIGGER_RISING |
-                       IRQF_TRIGGER_FALLING, "hpd", ip_data);
+                                NULL, hpd_irq_handler,
+                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                                IRQF_ONESHOT, "hpd", ip_data);
        if (r) {
                DSSERR("HPD IRQ request failed\n");
                hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
@@ -699,9 +704,15 @@ static void hdmi_wp_init(struct omap_video_timings *timings,
 
 }
 
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start)
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
+       return 0;
+}
+
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
 {
-       REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31);
+       REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
 }
 
 static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
@@ -886,10 +897,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
 
 #define CORE_REG(i, name) name(i)
 #define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
-               hdmi_read_reg(hdmi_pll_base(ip_data), r))
-#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
+               hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
+#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
+               hdmi_read_reg(hdmi_av_base(ip_data), r))
+#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
                (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
-               hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r)))
+               hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
 
        DUMPCORE(HDMI_CORE_SYS_VND_IDL);
        DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
@@ -898,6 +911,13 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
        DUMPCORE(HDMI_CORE_SYS_SRST);
        DUMPCORE(HDMI_CORE_CTRL1);
        DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+       DUMPCORE(HDMI_CORE_SYS_DE_DLY);
+       DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
+       DUMPCORE(HDMI_CORE_SYS_DE_TOP);
+       DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
+       DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
+       DUMPCORE(HDMI_CORE_SYS_DE_LINL);
+       DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
        DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
        DUMPCORE(HDMI_CORE_SYS_VID_MODE);
        DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
@@ -907,102 +927,91 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
        DUMPCORE(HDMI_CORE_SYS_INTR4);
        DUMPCORE(HDMI_CORE_SYS_UMASK1);
        DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
-       DUMPCORE(HDMI_CORE_SYS_DE_DLY);
-       DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
-       DUMPCORE(HDMI_CORE_SYS_DE_TOP);
-       DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
-       DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
-       DUMPCORE(HDMI_CORE_SYS_DE_LINL);
-       DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
 
-       DUMPCORE(HDMI_CORE_DDC_CMD);
-       DUMPCORE(HDMI_CORE_DDC_STATUS);
        DUMPCORE(HDMI_CORE_DDC_ADDR);
+       DUMPCORE(HDMI_CORE_DDC_SEGM);
        DUMPCORE(HDMI_CORE_DDC_OFFSET);
        DUMPCORE(HDMI_CORE_DDC_COUNT1);
        DUMPCORE(HDMI_CORE_DDC_COUNT2);
+       DUMPCORE(HDMI_CORE_DDC_STATUS);
+       DUMPCORE(HDMI_CORE_DDC_CMD);
        DUMPCORE(HDMI_CORE_DDC_DATA);
-       DUMPCORE(HDMI_CORE_DDC_SEGM);
 
-       DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
-       DUMPCORE(HDMI_CORE_AV_DPD);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
-       DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
-       DUMPCORE(HDMI_CORE_AV_AVI_VERS);
-       DUMPCORE(HDMI_CORE_AV_AVI_LEN);
-       DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
+       DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
+       DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
+       DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
+       DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
+       DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
+       DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
+       DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
+       DUMPCOREAV(HDMI_CORE_AV_ASRC);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+       DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
+       DUMPCOREAV(HDMI_CORE_AV_DPD);
+       DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
+       DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
 
        for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
 
        for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE);
-
-       DUMPCORE(HDMI_CORE_AV_ACR_CTRL);
-       DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
-       DUMPCORE(HDMI_CORE_AV_N_SVAL1);
-       DUMPCORE(HDMI_CORE_AV_N_SVAL2);
-       DUMPCORE(HDMI_CORE_AV_N_SVAL3);
-       DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
-       DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
-       DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
-       DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
-       DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
-       DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
-       DUMPCORE(HDMI_CORE_AV_AUD_MODE);
-       DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
-       DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
-       DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
-       DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
-       DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
-       DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
-       DUMPCORE(HDMI_CORE_AV_ASRC);
-       DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
-       DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
-       DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
-       DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
-       DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
-       DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
-       DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
-       DUMPCORE(HDMI_CORE_AV_DPD);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
-       DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
-       DUMPCORE(HDMI_CORE_AV_AVI_VERS);
-       DUMPCORE(HDMI_CORE_AV_AVI_LEN);
-       DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
-       DUMPCORE(HDMI_CORE_AV_SPD_VERS);
-       DUMPCORE(HDMI_CORE_AV_SPD_LEN);
-       DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
-       DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
-       DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
-       DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
-       DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
+               DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
 }
 
 void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
@@ -1016,9 +1025,8 @@ void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
        DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
 }
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
                                        struct hdmi_audio_format *aud_fmt)
 {
        u32 r;
@@ -1037,7 +1045,7 @@ void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
        hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
 }
 
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
                                        struct hdmi_audio_dma *aud_dma)
 {
        u32 r;
@@ -1055,7 +1063,7 @@ void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
        hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
 }
 
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
                                        struct hdmi_core_audio_config *cfg)
 {
        u32 r;
@@ -1106,27 +1114,33 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
        REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
                                                cfg->fs_override, 1, 1);
 
-       /* I2S parameters */
-       REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4,
-                                               cfg->freq_sample, 3, 0);
-
+       /*
+        * Set IEC-60958-3 channel status word. It is passed to the IP
+        * just as it is received. The user of the driver is responsible
+        * for its contents.
+        */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
+                      cfg->iec60958_cfg->status[0]);
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
+                      cfg->iec60958_cfg->status[1]);
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
+                      cfg->iec60958_cfg->status[2]);
+       /* yes, this is correct: status[3] goes to CHST4 register */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
+                      cfg->iec60958_cfg->status[3]);
+       /* yes, this is correct: status[4] goes to CHST5 register */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
+                      cfg->iec60958_cfg->status[4]);
+
+       /* set I2S parameters */
        r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
-       r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
        r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
-       r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
        r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
-       r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
        r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
        r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
        r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
        hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
 
-       r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
-       r = FLD_MOD(r, cfg->freq_sample, 7, 4);
-       r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
-       r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
-       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
-
        REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
                        cfg->i2s_cfg.in_length_bits, 3, 0);
 
@@ -1138,12 +1152,19 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
        r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
        r = FLD_MOD(r, cfg->en_spdif, 1, 1);
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
+
+       /* Audio channel mappings */
+       /* TODO: Make channel mapping dynamic. For now, map channels
+        * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
+        * HDMI speaker order is different. See CEA-861 Section 6.6.2.
+        */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
+       REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
 }
 
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
-               struct hdmi_core_infoframe_audio *info_aud)
+static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
+               struct snd_cea_861_aud_if *info_aud)
 {
-       u8 val;
        u8 sum = 0, checksum = 0;
        void __iomem *av_base = hdmi_av_base(ip_data);
 
@@ -1157,24 +1178,23 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
        sum += 0x84 + 0x001 + 0x00a;
 
-       val = (info_aud->db1_coding_type << 4)
-                       | (info_aud->db1_channel_count - 1);
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
+                      info_aud->db1_ct_cc);
+       sum += info_aud->db1_ct_cc;
 
-       val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
+                      info_aud->db2_sf_ss);
+       sum += info_aud->db2_sf_ss;
 
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
+       sum += info_aud->db3;
 
-       val = info_aud->db4_channel_alloc;
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
+       sum += info_aud->db4_ca;
 
-       val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
+                      info_aud->db5_dminh_lsv);
+       sum += info_aud->db5_dminh_lsv;
 
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
@@ -1192,70 +1212,212 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
         */
 }
 
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
-                               u32 sample_freq, u32 *n, u32 *cts)
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+               struct omap_dss_audio *audio)
 {
-       u32 r;
-       u32 deep_color = 0;
-       u32 pclk = ip_data->cfg.timings.pixel_clock;
-
-       if (n == NULL || cts == NULL)
+       struct hdmi_audio_format audio_format;
+       struct hdmi_audio_dma audio_dma;
+       struct hdmi_core_audio_config core;
+       int err, n, cts, channel_count;
+       unsigned int fs_nr;
+       bool word_length_16b = false;
+
+       if (!audio || !audio->iec || !audio->cea || !ip_data)
                return -EINVAL;
+
+       core.iec60958_cfg = audio->iec;
        /*
-        * Obtain current deep color configuration. This needed
-        * to calculate the TMDS clock based on the pixel clock.
+        * In the IEC-60958 status word, check if the audio sample word length
+        * is 16-bit as several optimizations can be performed in such case.
         */
-       r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0);
-       switch (r) {
-       case 1: /* No deep color selected */
-               deep_color = 100;
+       if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
+               if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
+                       word_length_16b = true;
+
+       /* I2S configuration. See Phillips' specification */
+       if (word_length_16b)
+               core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+       else
+               core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+       /*
+        * The I2S input word length is twice the lenght given in the IEC-60958
+        * status word. If the word size is greater than
+        * 20 bits, increment by one.
+        */
+       core.i2s_cfg.in_length_bits = audio->iec->status[4]
+               & IEC958_AES4_CON_WORDLEN;
+       if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
+               core.i2s_cfg.in_length_bits++;
+       core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+       core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+       core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+       core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+
+       /* convert sample frequency to a number */
+       switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
+       case IEC958_AES3_CON_FS_32000:
+               fs_nr = 32000;
+               break;
+       case IEC958_AES3_CON_FS_44100:
+               fs_nr = 44100;
+               break;
+       case IEC958_AES3_CON_FS_48000:
+               fs_nr = 48000;
                break;
-       case 2: /* 10-bit deep color selected */
-               deep_color = 125;
+       case IEC958_AES3_CON_FS_88200:
+               fs_nr = 88200;
                break;
-       case 3: /* 12-bit deep color selected */
-               deep_color = 150;
+       case IEC958_AES3_CON_FS_96000:
+               fs_nr = 96000;
+               break;
+       case IEC958_AES3_CON_FS_176400:
+               fs_nr = 176400;
+               break;
+       case IEC958_AES3_CON_FS_192000:
+               fs_nr = 192000;
                break;
        default:
                return -EINVAL;
        }
 
-       switch (sample_freq) {
-       case 32000:
-               if ((deep_color == 125) && ((pclk == 54054)
-                               || (pclk == 74250)))
-                       *n = 8192;
-               else
-                       *n = 4096;
+       err = hdmi_compute_acr(fs_nr, &n, &cts);
+
+       /* Audio clock regeneration settings */
+       core.n = n;
+       core.cts = cts;
+       if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+               core.aud_par_busclk = 0;
+               core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+               core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+       } else {
+               core.aud_par_busclk = (((128 * 31) - 1) << 8);
+               core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+               core.use_mclk = true;
+       }
+
+       if (core.use_mclk)
+               core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+
+       /* Audio channels settings */
+       channel_count = (audio->cea->db1_ct_cc &
+                        CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
+
+       switch (channel_count) {
+       case 2:
+               audio_format.active_chnnls_msk = 0x03;
+               break;
+       case 3:
+               audio_format.active_chnnls_msk = 0x07;
+               break;
+       case 4:
+               audio_format.active_chnnls_msk = 0x0f;
+               break;
+       case 5:
+               audio_format.active_chnnls_msk = 0x1f;
                break;
-       case 44100:
-               *n = 6272;
+       case 6:
+               audio_format.active_chnnls_msk = 0x3f;
                break;
-       case 48000:
-               if ((deep_color == 125) && ((pclk == 54054)
-                               || (pclk == 74250)))
-                       *n = 8192;
-               else
-                       *n = 6144;
+       case 7:
+               audio_format.active_chnnls_msk = 0x7f;
+               break;
+       case 8:
+               audio_format.active_chnnls_msk = 0xff;
                break;
        default:
-               *n = 0;
                return -EINVAL;
        }
 
-       /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
-       *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+       /*
+        * the HDMI IP needs to enable four stereo channels when transmitting
+        * more than 2 audio channels
+        */
+       if (channel_count == 2) {
+               audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
+               core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+               core.layout = HDMI_AUDIO_LAYOUT_2CH;
+       } else {
+               audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+               core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+                               HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+                               HDMI_AUDIO_I2S_SD3_EN;
+               core.layout = HDMI_AUDIO_LAYOUT_8CH;
+       }
+
+       core.en_spdif = false;
+       /* use sample frequency from channel status word */
+       core.fs_override = true;
+       /* enable ACR packets */
+       core.en_acr_pkt = true;
+       /* disable direct streaming digital audio */
+       core.en_dsd_audio = false;
+       /* use parallel audio interface */
+       core.en_parallel_aud_input = true;
+
+       /* DMA settings */
+       if (word_length_16b)
+               audio_dma.transfer_size = 0x10;
+       else
+               audio_dma.transfer_size = 0x20;
+       audio_dma.block_size = 0xC0;
+       audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
+       audio_dma.fifo_threshold = 0x20; /* in number of samples */
+
+       /* audio FIFO format settings */
+       if (word_length_16b) {
+               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
+               audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
+               audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+       } else {
+               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
+               audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
+               audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+       }
+       audio_format.type = HDMI_AUDIO_TYPE_LPCM;
+       audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+       /* disable start/stop signals of IEC 60958 blocks */
+       audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
+
+       /* configure DMA and audio FIFO format*/
+       ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
+       ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
+
+       /* configure the core*/
+       ti_hdmi_4xxx_core_audio_config(ip_data, &core);
+
+       /* configure CEA 861 audio infoframe*/
+       ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
 
        return 0;
 }
 
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_wp_base(ip_data),
+                   HDMI_WP_AUDIO_CTRL, true, 31, 31);
+       return 0;
+}
+
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_wp_base(ip_data),
+                   HDMI_WP_AUDIO_CTRL, false, 31, 31);
+}
+
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
 {
        REG_FLD_MOD(hdmi_av_base(ip_data),
-                               HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+                   HDMI_CORE_AV_AUD_MODE, true, 0, 0);
        REG_FLD_MOD(hdmi_wp_base(ip_data),
-                               HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+                   HDMI_WP_AUDIO_CTRL, true, 30, 30);
+       return 0;
+}
+
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_av_base(ip_data),
+                   HDMI_CORE_AV_AUD_MODE, false, 0, 0);
        REG_FLD_MOD(hdmi_wp_base(ip_data),
-                               HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+                   HDMI_WP_AUDIO_CTRL, false, 30, 30);
 }
 #endif
index a14d1a0e6e4146d00a70a88645d7292ea1793288..8366ae19e82eece140606c7178e6a70216fbec87 100644 (file)
 #include <linux/string.h>
 #include <video/omapdss.h>
 #include "ti_hdmi.h"
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#endif
 
 /* HDMI Wrapper */
 
 #define HDMI_CORE_SYS_SRST                     0x14
 #define HDMI_CORE_CTRL1                                0x20
 #define HDMI_CORE_SYS_SYS_STAT                 0x24
+#define HDMI_CORE_SYS_DE_DLY                   0xC8
+#define HDMI_CORE_SYS_DE_CTRL                  0xCC
+#define HDMI_CORE_SYS_DE_TOP                   0xD0
+#define HDMI_CORE_SYS_DE_CNTL                  0xD8
+#define HDMI_CORE_SYS_DE_CNTH                  0xDC
+#define HDMI_CORE_SYS_DE_LINL                  0xE0
+#define HDMI_CORE_SYS_DE_LINH_1                        0xE4
 #define HDMI_CORE_SYS_VID_ACEN                 0x124
 #define HDMI_CORE_SYS_VID_MODE                 0x128
 #define HDMI_CORE_SYS_INTR_STATE               0x1C0
 #define HDMI_CORE_SYS_INTR4                    0x1D0
 #define HDMI_CORE_SYS_UMASK1                   0x1D4
 #define HDMI_CORE_SYS_TMDS_CTRL                        0x208
-#define HDMI_CORE_SYS_DE_DLY                   0xC8
-#define HDMI_CORE_SYS_DE_CTRL                  0xCC
-#define HDMI_CORE_SYS_DE_TOP                   0xD0
-#define HDMI_CORE_SYS_DE_CNTL                  0xD8
-#define HDMI_CORE_SYS_DE_CNTH                  0xDC
-#define HDMI_CORE_SYS_DE_LINL                  0xE0
-#define HDMI_CORE_SYS_DE_LINH_1                        0xE4
+
 #define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC        0x1
 #define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC        0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS          0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS  0x1
 #define HDMI_CORE_CTRL1_EDGE_RISINGEDGE        0x1
 
 /* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_CMD                      0x3CC
-#define HDMI_CORE_DDC_STATUS                   0x3C8
 #define HDMI_CORE_DDC_ADDR                     0x3B4
+#define HDMI_CORE_DDC_SEGM                     0x3B8
 #define HDMI_CORE_DDC_OFFSET                   0x3BC
 #define HDMI_CORE_DDC_COUNT1                   0x3C0
 #define HDMI_CORE_DDC_COUNT2                   0x3C4
+#define HDMI_CORE_DDC_STATUS                   0x3C8
+#define HDMI_CORE_DDC_CMD                      0x3CC
 #define HDMI_CORE_DDC_DATA                     0x3D0
-#define HDMI_CORE_DDC_SEGM                     0x3B8
 
 /* HDMI IP Core Audio Video */
 
-#define HDMI_CORE_AV_HDMI_CTRL                 0xBC
-#define HDMI_CORE_AV_DPD                       0xF4
-#define HDMI_CORE_AV_PB_CTRL1                  0xF8
-#define HDMI_CORE_AV_PB_CTRL2                  0xFC
-#define HDMI_CORE_AV_AVI_TYPE                  0x100
-#define HDMI_CORE_AV_AVI_VERS                  0x104
-#define HDMI_CORE_AV_AVI_LEN                   0x108
-#define HDMI_CORE_AV_AVI_CHSUM                 0x10C
-#define HDMI_CORE_AV_AVI_DBYTE(n)              (n * 4 + 0x110)
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS          15
-#define HDMI_CORE_AV_SPD_DBYTE(n)              (n * 4 + 0x190)
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS          27
-#define HDMI_CORE_AV_AUD_DBYTE(n)              (n * 4 + 0x210)
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS          10
-#define HDMI_CORE_AV_MPEG_DBYTE(n)             (n * 4 + 0x290)
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS         27
-#define HDMI_CORE_AV_GEN_DBYTE(n)              (n * 4 + 0x300)
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS          31
-#define HDMI_CORE_AV_GEN2_DBYTE(n)             (n * 4 + 0x380)
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS         31
 #define HDMI_CORE_AV_ACR_CTRL                  0x4
 #define HDMI_CORE_AV_FREQ_SVAL                 0x8
 #define HDMI_CORE_AV_N_SVAL1                   0xC
 #define HDMI_CORE_AV_AVI_VERS                  0x104
 #define HDMI_CORE_AV_AVI_LEN                   0x108
 #define HDMI_CORE_AV_AVI_CHSUM                 0x10C
+#define HDMI_CORE_AV_AVI_DBYTE(n)              (n * 4 + 0x110)
 #define HDMI_CORE_AV_SPD_TYPE                  0x180
 #define HDMI_CORE_AV_SPD_VERS                  0x184
 #define HDMI_CORE_AV_SPD_LEN                   0x188
 #define HDMI_CORE_AV_SPD_CHSUM                 0x18C
+#define HDMI_CORE_AV_SPD_DBYTE(n)              (n * 4 + 0x190)
 #define HDMI_CORE_AV_AUDIO_TYPE                        0x200
 #define HDMI_CORE_AV_AUDIO_VERS                        0x204
 #define HDMI_CORE_AV_AUDIO_LEN                 0x208
 #define HDMI_CORE_AV_AUDIO_CHSUM               0x20C
+#define HDMI_CORE_AV_AUD_DBYTE(n)              (n * 4 + 0x210)
 #define HDMI_CORE_AV_MPEG_TYPE                 0x280
 #define HDMI_CORE_AV_MPEG_VERS                 0x284
 #define HDMI_CORE_AV_MPEG_LEN                  0x288
 #define HDMI_CORE_AV_MPEG_CHSUM                        0x28C
+#define HDMI_CORE_AV_MPEG_DBYTE(n)             (n * 4 + 0x290)
+#define HDMI_CORE_AV_GEN_DBYTE(n)              (n * 4 + 0x300)
 #define HDMI_CORE_AV_CP_BYTE1                  0x37C
+#define HDMI_CORE_AV_GEN2_DBYTE(n)             (n * 4 + 0x380)
 #define HDMI_CORE_AV_CEC_ADDR_ID               0x3FC
+
 #define HDMI_CORE_AV_SPD_DBYTE_ELSIZE          0x4
 #define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE         0x4
 #define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE         0x4
 #define HDMI_CORE_AV_GEN_DBYTE_ELSIZE          0x4
 
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS          15
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS          27
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS          10
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS         27
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS          31
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS         31
+
 /* PLL */
 
 #define PLLCTRL_PLL_CONTROL                    0x0
@@ -284,35 +274,6 @@ enum hdmi_core_infoframe {
        HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
        HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
        HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
-       HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
-       HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
-       HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
-       HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
-       HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
-       HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
-       HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
-       HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
-       HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
-       HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
-       HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
-       HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
-       HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
-       HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
-       HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
-       HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
-       HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
-       HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
-       HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
-       HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
-       HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
 };
 
 enum hdmi_packing_mode {
@@ -322,17 +283,6 @@ enum hdmi_packing_mode {
        HDMI_PACK_ALREADYPACKED = 7
 };
 
-enum hdmi_core_audio_sample_freq {
-       HDMI_AUDIO_FS_32000 = 0x3,
-       HDMI_AUDIO_FS_44100 = 0x0,
-       HDMI_AUDIO_FS_48000 = 0x2,
-       HDMI_AUDIO_FS_88200 = 0x8,
-       HDMI_AUDIO_FS_96000 = 0xA,
-       HDMI_AUDIO_FS_176400 = 0xC,
-       HDMI_AUDIO_FS_192000 = 0xE,
-       HDMI_AUDIO_FS_NOT_INDICATED = 0x1
-};
-
 enum hdmi_core_audio_layout {
        HDMI_AUDIO_LAYOUT_2CH = 0,
        HDMI_AUDIO_LAYOUT_8CH = 1
@@ -387,37 +337,12 @@ enum hdmi_audio_blk_strt_end_sig {
 };
 
 enum hdmi_audio_i2s_config {
-       HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
-       HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
        HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
        HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
-       HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
-       HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
-       HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
-       HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
-       HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
-       HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
-       HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
-       HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
-       HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
-       HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
-       HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
-       HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
-       HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
        HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
        HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
        HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
        HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
        HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
        HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
        HDMI_AUDIO_I2S_SD0_EN = 1,
@@ -446,20 +371,6 @@ struct hdmi_core_video_config {
        enum hdmi_core_tclkselclkmult   tclk_sel_clkmult;
 };
 
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_audio {
-       u8 db1_coding_type;
-       u8 db1_channel_count;
-       u8 db2_sample_freq;
-       u8 db2_sample_size;
-       u8 db4_channel_alloc;
-       bool db5_downmix_inh;
-       u8 db5_lsv;     /* Level shift values for downmix */
-};
-
 struct hdmi_core_packet_enable_repeat {
        u32     audio_pkt;
        u32     audio_pkt_repeat;
@@ -496,15 +407,10 @@ struct hdmi_audio_dma {
 };
 
 struct hdmi_core_audio_i2s_config {
-       u8 word_max_length;
-       u8 word_length;
        u8 in_length_bits;
        u8 justification;
-       u8 en_high_bitrate_aud;
        u8 sck_edge_mode;
-       u8 cbit_order;
        u8 vbit;
-       u8 ws_polarity;
        u8 direction;
        u8 shift;
        u8 active_sds;
@@ -512,7 +418,7 @@ struct hdmi_core_audio_i2s_config {
 
 struct hdmi_core_audio_config {
        struct hdmi_core_audio_i2s_config       i2s_cfg;
-       enum hdmi_core_audio_sample_freq        freq_sample;
+       struct snd_aes_iec958                   *iec60958_cfg;
        bool                                    fs_override;
        u32                                     n;
        u32                                     cts;
@@ -527,17 +433,4 @@ struct hdmi_core_audio_config {
        bool                                    en_spdif;
 };
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
-                               u32 sample_freq, u32 *n, u32 *cts);
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
-               struct hdmi_core_infoframe_audio *info_aud);
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
-                                       struct hdmi_core_audio_config *cfg);
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
-                                       struct hdmi_audio_dma *aud_dma);
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
-                                       struct hdmi_audio_format *aud_fmt);
-#endif
 #endif
index 9c3daf71750c769057d3a03f05019e950a3b5ed8..3907c8b6ecbca991e3cc9313c1a1473c247cb216 100644 (file)
@@ -402,7 +402,7 @@ static void venc_runtime_put(void)
        DSSDBG("venc_runtime_put\n");
 
        r = pm_runtime_put_sync(&venc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static const struct venc_config *venc_timings_to_config(
@@ -415,6 +415,7 @@ static const struct venc_config *venc_timings_to_config(
                return &venc_config_ntsc_trm;
 
        BUG();
+       return NULL;
 }
 
 static int venc_power_on(struct omap_dss_device *dssdev)
@@ -440,10 +441,11 @@ static int venc_power_on(struct omap_dss_device *dssdev)
 
        venc_write_reg(VENC_OUTPUT_CONTROL, l);
 
-       dispc_set_digit_size(dssdev->panel.timings.x_res,
-                       dssdev->panel.timings.y_res/2);
+       dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 
-       regulator_enable(venc.vdda_dac_reg);
+       r = regulator_enable(venc.vdda_dac_reg);
+       if (r)
+               goto err;
 
        if (dssdev->platform_enable)
                dssdev->platform_enable(dssdev);
@@ -485,16 +487,68 @@ unsigned long venc_get_pixel_clock(void)
        return 13500000;
 }
 
+static ssize_t display_output_type_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       const char *ret;
+
+       switch (dssdev->phy.venc.type) {
+       case OMAP_DSS_VENC_TYPE_COMPOSITE:
+               ret = "composite";
+               break;
+       case OMAP_DSS_VENC_TYPE_SVIDEO:
+               ret = "svideo";
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", ret);
+}
+
+static ssize_t display_output_type_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       enum omap_dss_venc_type new_type;
+
+       if (sysfs_streq("composite", buf))
+               new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+       else if (sysfs_streq("svideo", buf))
+               new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
+       else
+               return -EINVAL;
+
+       mutex_lock(&venc.venc_lock);
+
+       if (dssdev->phy.venc.type != new_type) {
+               dssdev->phy.venc.type = new_type;
+               if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+                       venc_power_off(dssdev);
+                       venc_power_on(dssdev);
+               }
+       }
+
+       mutex_unlock(&venc.venc_lock);
+
+       return size;
+}
+
+static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
+               display_output_type_show, display_output_type_store);
+
 /* driver */
 static int venc_panel_probe(struct omap_dss_device *dssdev)
 {
        dssdev->panel.timings = omap_dss_pal_timings;
 
-       return 0;
+       return device_create_file(&dssdev->dev, &dev_attr_output_type);
 }
 
 static void venc_panel_remove(struct omap_dss_device *dssdev)
 {
+       device_remove_file(&dssdev->dev, &dev_attr_output_type);
 }
 
 static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -577,12 +631,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
        return venc_panel_enable(dssdev);
 }
 
-static void venc_get_timings(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void venc_set_timings(struct omap_dss_device *dssdev,
                        struct omap_video_timings *timings)
 {
@@ -597,6 +645,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
                /* turn the venc off and on to get new timings to use */
                venc_panel_disable(dssdev);
                venc_panel_enable(dssdev);
+       } else {
+               dss_mgr_set_timings(dssdev->manager, timings);
        }
 }
 
@@ -661,7 +711,6 @@ static struct omap_dss_driver venc_driver = {
        .get_resolution = omapdss_default_get_resolution,
        .get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-       .get_timings    = venc_get_timings,
        .set_timings    = venc_set_timings,
        .check_timings  = venc_check_timings,
 
@@ -675,7 +724,7 @@ static struct omap_dss_driver venc_driver = {
 };
 /* driver end */
 
-int venc_init_display(struct omap_dss_device *dssdev)
+static int __init venc_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("init_display\n");
 
@@ -695,7 +744,7 @@ int venc_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-void venc_dump_regs(struct seq_file *s)
+static void venc_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
 
@@ -779,8 +828,32 @@ static void venc_put_clocks(void)
                clk_put(venc.tv_dac_clk);
 }
 
+static void __init venc_probe_pdata(struct platform_device *pdev)
+{
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int r, i;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
+                       continue;
+
+               r = venc_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
 /* VENC HW IP initialisation */
-static int omap_venchw_probe(struct platform_device *pdev)
+static int __init omap_venchw_probe(struct platform_device *pdev)
 {
        u8 rev_id;
        struct resource *venc_mem;
@@ -824,6 +897,10 @@ static int omap_venchw_probe(struct platform_device *pdev)
        if (r)
                goto err_reg_panel_driver;
 
+       dss_debugfs_create_file("venc", venc_dump_regs);
+
+       venc_probe_pdata(pdev);
+
        return 0;
 
 err_reg_panel_driver:
@@ -833,12 +910,15 @@ err_runtime_get:
        return r;
 }
 
-static int omap_venchw_remove(struct platform_device *pdev)
+static int __exit omap_venchw_remove(struct platform_device *pdev)
 {
+       omap_dss_unregister_child_devices(&pdev->dev);
+
        if (venc.vdda_dac_reg != NULL) {
                regulator_put(venc.vdda_dac_reg);
                venc.vdda_dac_reg = NULL;
        }
+
        omap_dss_unregister_driver(&venc_driver);
 
        pm_runtime_disable(&pdev->dev);
@@ -853,7 +933,6 @@ static int venc_runtime_suspend(struct device *dev)
                clk_disable(venc.tv_dac_clk);
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -862,23 +941,14 @@ static int venc_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r < 0)
-               goto err_get_dispc;
+               return r;
 
        if (venc.tv_dac_clk)
                clk_enable(venc.tv_dac_clk);
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops venc_pm_ops = {
@@ -887,8 +957,7 @@ static const struct dev_pm_ops venc_pm_ops = {
 };
 
 static struct platform_driver omap_venchw_driver = {
-       .probe          = omap_venchw_probe,
-       .remove         = omap_venchw_remove,
+       .remove         = __exit_p(omap_venchw_remove),
        .driver         = {
                .name   = "omapdss_venc",
                .owner  = THIS_MODULE,
@@ -896,18 +965,18 @@ static struct platform_driver omap_venchw_driver = {
        },
 };
 
-int venc_init_platform_driver(void)
+int __init venc_init_platform_driver(void)
 {
        if (cpu_is_omap44xx())
                return 0;
 
-       return platform_driver_register(&omap_venchw_driver);
+       return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
 }
 
-void venc_uninit_platform_driver(void)
+void __exit venc_uninit_platform_driver(void)
 {
        if (cpu_is_omap44xx())
                return;
 
-       return platform_driver_unregister(&omap_venchw_driver);
+       platform_driver_unregister(&omap_venchw_driver);
 }
index 6a09ef87e14fae9886461bb7855c9d97350707e0..c6cf372d22c58b9941051c67028df365e178657f 100644 (file)
@@ -70,7 +70,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
 
        DBG("omapfb_setup_plane\n");
 
-       if (ofbi->num_overlays != 1) {
+       if (ofbi->num_overlays == 0) {
                r = -EINVAL;
                goto out;
        }
@@ -185,7 +185,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
 {
        struct omapfb_info *ofbi = FB2OFB(fbi);
 
-       if (ofbi->num_overlays != 1) {
+       if (ofbi->num_overlays == 0) {
                memset(pi, 0, sizeof(*pi));
        } else {
                struct omap_overlay *ovl;
@@ -225,6 +225,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
        down_write_nested(&rg->lock, rg->id);
        atomic_inc(&rg->lock_count);
 
+       if (rg->size == size && rg->type == mi->type)
+               goto out;
+
        if (atomic_read(&rg->map_count)) {
                r = -EBUSY;
                goto out;
@@ -247,12 +250,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
                }
        }
 
-       if (rg->size != size || rg->type != mi->type) {
-               r = omapfb_realloc_fbmem(fbi, size, mi->type);
-               if (r) {
-                       dev_err(fbdev->dev, "realloc fbmem failed\n");
-                       goto out;
-               }
+       r = omapfb_realloc_fbmem(fbi, size, mi->type);
+       if (r) {
+               dev_err(fbdev->dev, "realloc fbmem failed\n");
+               goto out;
        }
 
  out:
index b00db4068d21c0280631fb1ca5816d4a499501f0..3450ea0966c97e6227145f3ad484f4850f0125ff 100644 (file)
@@ -179,6 +179,7 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
                break;
        default:
                BUG();
+               return 0;
        }
 
        offset *= vrfb->bytespp;
@@ -1502,7 +1503,7 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
 
                fbnum = simple_strtoul(p, &p, 10);
 
-               if (p == param)
+               if (p == start)
                        return -EINVAL;
 
                if (*p != ':')
@@ -2307,7 +2308,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
        return 0;
 }
 
-static int omapfb_probe(struct platform_device *pdev)
+static int __init omapfb_probe(struct platform_device *pdev)
 {
        struct omapfb2_device *fbdev = NULL;
        int r = 0;
@@ -2448,7 +2449,7 @@ err0:
        return r;
 }
 
-static int omapfb_remove(struct platform_device *pdev)
+static int __exit omapfb_remove(struct platform_device *pdev)
 {
        struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
 
@@ -2462,8 +2463,7 @@ static int omapfb_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver omapfb_driver = {
-       .probe          = omapfb_probe,
-       .remove         = omapfb_remove,
+       .remove         = __exit_p(omapfb_remove),
        .driver         = {
                .name   = "omapfb",
                .owner  = THIS_MODULE,
@@ -2474,7 +2474,7 @@ static int __init omapfb_init(void)
 {
        DBG("omapfb_init\n");
 
-       if (platform_driver_register(&omapfb_driver)) {
+       if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
                printk(KERN_ERR "failed to register omapfb driver\n");
                return -ENODEV;
        }
index c0bdc9b54ecf77d1f22355ca956b6367d2ceac70..30361a09aecdd231c19d40241733d1aa873e1a79 100644 (file)
@@ -166,6 +166,7 @@ static inline struct omapfb_display_data *get_display_data(
 
        /* This should never happen */
        BUG();
+       return NULL;
 }
 
 static inline void omapfb_lock(struct omapfb2_device *fbdev)
index 4e5b960c32c88bbcab1ec7e31c1f1eb7772898d7..7e990220ad2a6f52a25ff861f754d86e447a8358 100644 (file)
@@ -179,8 +179,10 @@ void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
                pixel_size_exp = 2;
        else if (bytespp == 2)
                pixel_size_exp = 1;
-       else
+       else {
                BUG();
+               return;
+       }
 
        vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
        vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
index 1d71c08a818f7d1fc646496f9ae9c32d77943c88..0b4ae0cebedaf7c0a6b9c8f3d262d7917ecf8911 100644 (file)
@@ -316,12 +316,9 @@ pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
                ret = wait_event_interruptible_timeout(priv->wait_idle,
                                        !priv->shared->hw_running, HZ*4);
 
-               if (ret < 0)
+               if (ret != 0)
                        break;
 
-               if (ret > 0)
-                       continue;
-
                if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
                    priv->shared->num_interrupts == num) {
                        QERROR("TIMEOUT");
index f3105160bf9829104d22ec4c9e2b24c063debeaa..ea7b661e7229039c29770c50414f4846e6865192 100644 (file)
@@ -47,7 +47,7 @@
 #ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
 #undef writel
 #define writel(v, r) do { \
-       printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \
+       pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \
        __raw_writel(v, r); \
 } while (0)
 #endif /* FB_S3C_DEBUG_REGWRITE */
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
        result = (unsigned int)tmp / 1000;
 
        dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
-               pixclk, clk, result, clk / result);
+               pixclk, clk, result, result ? clk / result : clk);
 
        return result;
 }
@@ -495,7 +495,6 @@ static int s3c_fb_set_par(struct fb_info *info)
        u32 alpha = 0;
        u32 data;
        u32 pagewidth;
-       int clkdiv;
 
        dev_dbg(sfb->dev, "setting framebuffer parameters\n");
 
@@ -532,48 +531,9 @@ static int s3c_fb_set_par(struct fb_info *info)
        /* disable the window whilst we update it */
        writel(0, regs + WINCON(win_no));
 
-       /* use platform specified window as the basis for the lcd timings */
-
-       if (win_no == sfb->pdata->default_win) {
-               clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
-
-               data = sfb->pdata->vidcon0;
-               data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
-
-               if (clkdiv > 1)
-                       data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
-               else
-                       data &= ~VIDCON0_CLKDIR;        /* 1:1 clock */
-
-               /* write the timing data to the panel */
-
-               if (sfb->variant.is_2443)
-                       data |= (1 << 5);
-
-               writel(data, regs + VIDCON0);
-
+       if (!sfb->output_on)
                s3c_fb_enable(sfb, 1);
 
-               data = VIDTCON0_VBPD(var->upper_margin - 1) |
-                      VIDTCON0_VFPD(var->lower_margin - 1) |
-                      VIDTCON0_VSPW(var->vsync_len - 1);
-
-               writel(data, regs + sfb->variant.vidtcon);
-
-               data = VIDTCON1_HBPD(var->left_margin - 1) |
-                      VIDTCON1_HFPD(var->right_margin - 1) |
-                      VIDTCON1_HSPW(var->hsync_len - 1);
-
-               /* VIDTCON1 */
-               writel(data, regs + sfb->variant.vidtcon + 4);
-
-               data = VIDTCON2_LINEVAL(var->yres - 1) |
-                      VIDTCON2_HOZVAL(var->xres - 1) |
-                      VIDTCON2_LINEVAL_E(var->yres - 1) |
-                      VIDTCON2_HOZVAL_E(var->xres - 1);
-               writel(data, regs + sfb->variant.vidtcon + 8);
-       }
-
        /* write the buffer address */
 
        /* start and end registers stride is 8 */
@@ -839,6 +799,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
        struct s3c_fb *sfb = win->parent;
        unsigned int index = win->index;
        u32 wincon;
+       u32 output_on = sfb->output_on;
 
        dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
 
@@ -877,34 +838,18 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
 
        shadow_protect_win(win, 1);
        writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
-       shadow_protect_win(win, 0);
 
        /* Check the enabled state to see if we need to be running the
         * main LCD interface, as if there are no active windows then
         * it is highly likely that we also do not need to output
         * anything.
         */
-
-       /* We could do something like the following code, but the current
-        * system of using framebuffer events means that we cannot make
-        * the distinction between just window 0 being inactive and all
-        * the windows being down.
-        *
-        * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
-       */
-
-       /* we're stuck with this until we can do something about overriding
-        * the power control using the blanking event for a single fb.
-        */
-       if (index == sfb->pdata->default_win) {
-               shadow_protect_win(win, 1);
-               s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
-               shadow_protect_win(win, 0);
-       }
+       s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
+       shadow_protect_win(win, 0);
 
        pm_runtime_put_sync(sfb->dev);
 
-       return 0;
+       return output_on == sfb->output_on;
 }
 
 /**
@@ -1111,7 +1056,7 @@ static struct fb_ops s3c_fb_ops = {
  *
  * Calculate the pixel clock when none has been given through platform data.
  */
-static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode)
+static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
 {
        u64 pixclk = 1000000000000ULL;
        u32 div;
@@ -1144,11 +1089,11 @@ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb,
 
        dev_dbg(sfb->dev, "allocating memory for display\n");
 
-       real_size = windata->win_mode.xres * windata->win_mode.yres;
+       real_size = windata->xres * windata->yres;
        virt_size = windata->virtual_x * windata->virtual_y;
 
        dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n",
-               real_size, windata->win_mode.xres, windata->win_mode.yres,
+               real_size, windata->xres, windata->yres,
                virt_size, windata->virtual_x, windata->virtual_y);
 
        size = (real_size > virt_size) ? real_size : virt_size;
@@ -1230,7 +1175,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
                                      struct s3c_fb_win **res)
 {
        struct fb_var_screeninfo *var;
-       struct fb_videomode *initmode;
+       struct fb_videomode initmode;
        struct s3c_fb_pd_win *windata;
        struct s3c_fb_win *win;
        struct fb_info *fbinfo;
@@ -1251,11 +1196,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        }
 
        windata = sfb->pdata->win[win_no];
-       initmode = &windata->win_mode;
+       initmode = *sfb->pdata->vtiming;
 
        WARN_ON(windata->max_bpp == 0);
-       WARN_ON(windata->win_mode.xres == 0);
-       WARN_ON(windata->win_mode.yres == 0);
+       WARN_ON(windata->xres == 0);
+       WARN_ON(windata->yres == 0);
 
        win = fbinfo->par;
        *res = win;
@@ -1294,7 +1239,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        }
 
        /* setup the initial video mode from the window */
-       fb_videomode_to_var(&fbinfo->var, initmode);
+       initmode.xres = windata->xres;
+       initmode.yres = windata->yres;
+       fb_videomode_to_var(&fbinfo->var, &initmode);
 
        fbinfo->fix.type        = FB_TYPE_PACKED_PIXELS;
        fbinfo->fix.accel       = FB_ACCEL_NONE;
@@ -1338,6 +1285,53 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        return 0;
 }
 
+/**
+ * s3c_fb_set_rgb_timing() - set video timing for rgb interface.
+ * @sfb: The base resources for the hardware.
+ *
+ * Set horizontal and vertical lcd rgb interface timing.
+ */
+static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb)
+{
+       struct fb_videomode *vmode = sfb->pdata->vtiming;
+       void __iomem *regs = sfb->regs;
+       int clkdiv;
+       u32 data;
+
+       if (!vmode->pixclock)
+               s3c_fb_missing_pixclock(vmode);
+
+       clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock);
+
+       data = sfb->pdata->vidcon0;
+       data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+       if (clkdiv > 1)
+               data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
+       else
+               data &= ~VIDCON0_CLKDIR;        /* 1:1 clock */
+
+       if (sfb->variant.is_2443)
+               data |= (1 << 5);
+       writel(data, regs + VIDCON0);
+
+       data = VIDTCON0_VBPD(vmode->upper_margin - 1) |
+              VIDTCON0_VFPD(vmode->lower_margin - 1) |
+              VIDTCON0_VSPW(vmode->vsync_len - 1);
+       writel(data, regs + sfb->variant.vidtcon);
+
+       data = VIDTCON1_HBPD(vmode->left_margin - 1) |
+              VIDTCON1_HFPD(vmode->right_margin - 1) |
+              VIDTCON1_HSPW(vmode->hsync_len - 1);
+       writel(data, regs + sfb->variant.vidtcon + 4);
+
+       data = VIDTCON2_LINEVAL(vmode->yres - 1) |
+              VIDTCON2_HOZVAL(vmode->xres - 1) |
+              VIDTCON2_LINEVAL_E(vmode->yres - 1) |
+              VIDTCON2_HOZVAL_E(vmode->xres - 1);
+       writel(data, regs + sfb->variant.vidtcon + 8);
+}
+
 /**
  * s3c_fb_clear_win() - clear hardware window registers.
  * @sfb: The base resources for the hardware.
@@ -1354,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
        writel(0, regs + VIDOSD_A(win, sfb->variant));
        writel(0, regs + VIDOSD_B(win, sfb->variant));
        writel(0, regs + VIDOSD_C(win, sfb->variant));
-       reg = readl(regs + SHADOWCON);
-       writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON);
+
+       if (sfb->variant.has_shadowcon) {
+               reg = readl(sfb->regs + SHADOWCON);
+               reg &= ~(SHADOWCON_WINx_PROTECT(win) |
+                       SHADOWCON_CHx_ENABLE(win) |
+                       SHADOWCON_CHx_LOCAL_ENABLE(win));
+               writel(reg, sfb->regs + SHADOWCON);
+       }
 }
 
 static int __devinit s3c_fb_probe(struct platform_device *pdev)
@@ -1481,15 +1481,14 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
                writel(0xffffff, regs + WKEYCON1);
        }
 
+       s3c_fb_set_rgb_timing(sfb);
+
        /* we have the register setup, start allocating framebuffers */
 
        for (win = 0; win < fbdrv->variant.nr_windows; win++) {
                if (!pd->win[win])
                        continue;
 
-               if (!pd->win[win]->win_mode.pixclock)
-                       s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
-
                ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
                                       &sfb->windows[win]);
                if (ret < 0) {
@@ -1564,6 +1563,8 @@ static int s3c_fb_suspend(struct device *dev)
        struct s3c_fb_win *win;
        int win_no;
 
+       pm_runtime_get_sync(sfb->dev);
+
        for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
                win = sfb->windows[win_no];
                if (!win)
@@ -1577,6 +1578,9 @@ static int s3c_fb_suspend(struct device *dev)
                clk_disable(sfb->lcd_clk);
 
        clk_disable(sfb->bus_clk);
+
+       pm_runtime_put_sync(sfb->dev);
+
        return 0;
 }
 
@@ -1589,6 +1593,8 @@ static int s3c_fb_resume(struct device *dev)
        int win_no;
        u32 reg;
 
+       pm_runtime_get_sync(sfb->dev);
+
        clk_enable(sfb->bus_clk);
 
        if (!sfb->variant.has_clksel)
@@ -1623,6 +1629,8 @@ static int s3c_fb_resume(struct device *dev)
                shadow_protect_win(win, 0);
        }
 
+       s3c_fb_set_rgb_timing(sfb);
+
        /* restore framebuffers */
        for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
                win = sfb->windows[win_no];
@@ -1633,6 +1641,8 @@ static int s3c_fb_resume(struct device *dev)
                s3c_fb_set_par(win->fbinfo);
        }
 
+       pm_runtime_put_sync(sfb->dev);
+
        return 0;
 }
 #endif
index cee7803a0a1c74fe09c1129f5edc927e58dfe86f..f3d3b9ce4751315afbfe15d6fc9c4b0b5a07ae3d 100644 (file)
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par  *par, struct savage_reg *r
        /* following part not present in X11 driver */
        cr67 = vga_in8(0x3d5, par) & 0xf;
        vga_out8(0x3d5, 0x50 | cr67, par);
-       udelay(10000);
+       mdelay(10);
        vga_out8(0x3d4, 0x67, par);
        /* end of part */
        vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par)
        vga_out8(0x3d4, 0x66, par);
        cr66 = vga_in8(0x3d5, par);
        vga_out8(0x3d5, cr66 | 0x02, par);
-       udelay(10000);
+       mdelay(10);
 
        vga_out8(0x3d4, 0x66, par);
        vga_out8(0x3d5, cr66 & ~0x02, par);     /* clear reset flag */
-       udelay(10000);
+       mdelay(10);
 
 
        /*
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par)
        vga_out8(0x3d4, 0x3f, par);
        cr3f = vga_in8(0x3d5, par);
        vga_out8(0x3d5, cr3f | 0x08, par);
-       udelay(10000);
+       mdelay(10);
 
        vga_out8(0x3d4, 0x3f, par);
        vga_out8(0x3d5, cr3f & ~0x08, par);     /* clear reset flags */
-       udelay(10000);
+       mdelay(10);
 
        /* Savage ramdac speeds */
        par->numClocks = 4;
index eafb19da2c0783d92a12ea95e3104d1e6d591d66..930e550e752ac5712b99dc4909de57d0ccd7fafd 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "sh_mobile_lcdcfb.h"
 
+/* HDMI Core Control Register (HTOP0) */
 #define HDMI_SYSTEM_CTRL                       0x00 /* System control */
 #define HDMI_L_R_DATA_SWAP_CTRL_RPKT           0x01 /* L/R data swap control,
                                                        bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
 #define HDMI_REVISION_ID                       0xF1 /* Revision ID */
 #define HDMI_TEST_MODE                         0xFE /* Test mode */
 
+/* HDMI Control Register (HTOP1) */
+#define HDMI_HTOP1_TEST_MODE                   0x0000 /* Test mode */
+#define HDMI_HTOP1_VIDEO_INPUT                 0x0008 /* VideoInput */
+#define HDMI_HTOP1_CORE_RSTN                   0x000C /* CoreResetn */
+#define HDMI_HTOP1_PLLBW                       0x0018 /* PLLBW */
+#define HDMI_HTOP1_CLK_TO_PHY                  0x001C /* Clk to Phy */
+#define HDMI_HTOP1_VIDEO_INPUT2                        0x0020 /* VideoInput2 */
+#define HDMI_HTOP1_TISEMP0_1                   0x0024 /* tisemp0-1 */
+#define HDMI_HTOP1_TISEMP2_C                   0x0028 /* tisemp2-c */
+#define HDMI_HTOP1_TISIDRV                     0x002C /* tisidrv */
+#define HDMI_HTOP1_TISEN                       0x0034 /* tisen */
+#define HDMI_HTOP1_TISDREN                     0x0038 /* tisdren  */
+#define HDMI_HTOP1_CISRANGE                    0x003C /* cisrange  */
+#define HDMI_HTOP1_ENABLE_SELECTOR             0x0040 /* Enable Selector */
+#define HDMI_HTOP1_MACRO_RESET                 0x0044 /* Macro reset */
+#define HDMI_HTOP1_PLL_CALIBRATION             0x0048 /* PLL calibration */
+#define HDMI_HTOP1_RE_CALIBRATION              0x004C /* Re-calibration */
+#define HDMI_HTOP1_CURRENT                     0x0050 /* Current */
+#define HDMI_HTOP1_PLL_LOCK_DETECT             0x0054 /* PLL lock detect */
+#define HDMI_HTOP1_PHY_TEST_MODE               0x0058 /* PHY Test Mode */
+#define HDMI_HTOP1_CLK_SET                     0x0080 /* Clock Set */
+#define HDMI_HTOP1_DDC_FAIL_SAFE               0x0084 /* DDC fail safe */
+#define HDMI_HTOP1_PRBS                                0x0088 /* PRBS */
+#define HDMI_HTOP1_EDID_AINC_CONTROL           0x008C /* EDID ainc Control */
+#define HDMI_HTOP1_HTOP_DCL_MODE               0x00FC /* Deep Coloer Mode */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0          0x0100 /* Deep Color:FRC COEF0 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1          0x0104 /* Deep Color:FRC COEF1 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2          0x0108 /* Deep Color:FRC COEF2 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3          0x010C /* Deep Color:FRC COEF3 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C                0x0110 /* Deep Color:FRC COEF0C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C                0x0114 /* Deep Color:FRC COEF1C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C                0x0118 /* Deep Color:FRC COEF2C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C                0x011C /* Deep Color:FRC COEF3C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_MODE           0x0120 /* Deep Color:FRC Mode */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START1                0x0124 /* Deep Color:Rect Start1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1         0x0128 /* Deep Color:Rect Size1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START2                0x012C /* Deep Color:Rect Start2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2         0x0130 /* Deep Color:Rect Size2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START3                0x0134 /* Deep Color:Rect Start3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3         0x0138 /* Deep Color:Rect Size3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START4                0x013C /* Deep Color:Rect Start4 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4         0x0140 /* Deep Color:Rect Size4 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1      0x0144 /* Deep Color:Fil Para Y1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2      0x0148 /* Deep Color:Fil Para Y1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1     0x014C /* Deep Color:Fil Para CB1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2     0x0150 /* Deep Color:Fil Para CB1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1     0x0154 /* Deep Color:Fil Para CR1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2     0x0158 /* Deep Color:Fil Para CR1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1      0x015C /* Deep Color:Fil Para Y2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2      0x0160 /* Deep Color:Fil Para Y2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1     0x0164 /* Deep Color:Fil Para CB2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2     0x0168 /* Deep Color:Fil Para CB2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1     0x016C /* Deep Color:Fil Para CR2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2     0x0170 /* Deep Color:Fil Para CR2_2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1                0x0174 /* Deep Color:Cor Para Y1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1       0x0178 /* Deep Color:Cor Para CB1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1       0x017C /* Deep Color:Cor Para CR1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2                0x0180 /* Deep Color:Cor Para Y2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2       0x0184 /* Deep Color:Cor Para CB2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2       0x0188 /* Deep Color:Cor Para CR2 */
+#define HDMI_HTOP1_EDID_DATA_READ              0x0200 /* EDID Data Read 128Byte:0x03FC */
+
 enum hotplug_state {
        HDMI_HOTPLUG_DISCONNECTED,
        HDMI_HOTPLUG_CONNECTED,
@@ -211,6 +274,7 @@ struct sh_hdmi {
        struct sh_mobile_lcdc_entity entity;
 
        void __iomem *base;
+       void __iomem *htop1;
        enum hotplug_state hp_state;    /* hot-plug status */
        u8 preprogrammed_vic;           /* use a pre-programmed VIC or
                                           the external mode */
@@ -222,20 +286,66 @@ struct sh_hdmi {
        struct delayed_work edid_work;
        struct fb_videomode mode;
        struct fb_monspecs monspec;
+
+       /* register access functions */
+       void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg);
+       u8 (*read)(struct sh_hdmi *hdmi, u8 reg);
 };
 
 #define entity_to_sh_hdmi(e)   container_of(e, struct sh_hdmi, entity)
 
-static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg)
 {
        iowrite8(data, hdmi->base + reg);
 }
 
-static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg)
 {
        return ioread8(hdmi->base + reg);
 }
 
+static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+       iowrite32((u32)data, hdmi->base + (reg * 4));
+       udelay(100);
+}
+
+static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg)
+{
+       return (u8)ioread32(hdmi->base + (reg * 4));
+}
+
+static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+       hdmi->write(hdmi, data, reg);
+}
+
+static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+{
+       return hdmi->read(hdmi, reg);
+}
+
+static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg)
+{
+       u8 val = hdmi_read(hdmi, reg);
+
+       val &= ~mask;
+       val |= (data & mask);
+
+       hdmi_write(hdmi, val, reg);
+}
+
+static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg)
+{
+       iowrite32(data, hdmi->htop1 + reg);
+       udelay(100);
+}
+
+static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg)
+{
+       return ioread32(hdmi->htop1 + reg);
+}
+
 /*
  *     HDMI sound
  */
@@ -693,11 +803,11 @@ static void sh_hdmi_configure(struct sh_hdmi *hdmi)
        msleep(10);
 
        /* PS mode b->d, reset PLLA and PLLB */
-       hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL);
 
        udelay(10);
 
-       hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL);
 }
 
 static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
@@ -746,7 +856,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
        /* Read EDID */
        dev_dbg(hdmi->dev, "Read back EDID code:");
        for (i = 0; i < 128; i++) {
-               edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
+               edid[i] = (hdmi->htop1) ?
+                       (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) :
+                       hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
 #ifdef DEBUG
                if ((i % 16) == 0) {
                        printk(KERN_CONT "\n");
@@ -917,13 +1029,13 @@ static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id)
        u8 status1, status2, mask1, mask2;
 
        /* mode_b and PLLA and PLLB reset */
-       hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL);
 
        /* How long shall reset be held? */
        udelay(10);
 
        /* mode_b and PLLA and PLLB reset release */
-       hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL);
 
        status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
        status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
@@ -1001,7 +1113,7 @@ static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity)
         */
        if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
                /* PS mode d->e. All functions are active */
-               hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL);
+               hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL);
                dev_dbg(hdmi->dev, "HDMI running\n");
        }
 
@@ -1016,7 +1128,7 @@ static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity)
 
        dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi);
        /* PS mode e->a */
-       hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL);
 }
 
 static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = {
@@ -1110,10 +1222,58 @@ out:
        dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi);
 }
 
+static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
+{
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE);
+       hdmi_htop1_write(hdmi, 0x0000000b, 0x0010);
+       hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2);
+       hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE);
+       hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+       hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+       hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+       hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE);
+       msleep(100);
+       hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR);
+       msleep(100);
+       hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+       hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2);
+       hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET);
+}
+
 static int __init sh_hdmi_probe(struct platform_device *pdev)
 {
        struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       struct resource *htop1_res;
        int irq = platform_get_irq(pdev, 0), ret;
        struct sh_hdmi *hdmi;
        long rate;
@@ -1121,6 +1281,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
        if (!res || !pdata || irq < 0)
                return -ENODEV;
 
+       htop1_res = NULL;
+       if (pdata->flags & HDMI_HAS_HTOP1) {
+               htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               if (!htop1_res) {
+                       dev_err(&pdev->dev, "htop1 needs register base\n");
+                       return -EINVAL;
+               }
+       }
+
        hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
        if (!hdmi) {
                dev_err(&pdev->dev, "Cannot allocate device data\n");
@@ -1138,6 +1307,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
                goto egetclk;
        }
 
+       /* select register access functions */
+       if (pdata->flags & HDMI_32BIT_REG) {
+               hdmi->write     = __hdmi_write32;
+               hdmi->read      = __hdmi_read32;
+       } else {
+               hdmi->write     = __hdmi_write8;
+               hdmi->read      = __hdmi_read8;
+       }
+
        /* An arbitrary relaxed pixclock just to get things started: from standard 480p */
        rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037));
        if (rate > 0)
@@ -1176,6 +1354,24 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
 
+       /* init interrupt polarity */
+       if (pdata->flags & HDMI_OUTPUT_PUSH_PULL)
+               hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL);
+
+       if (pdata->flags & HDMI_OUTPUT_POLARITY_HI)
+               hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL);
+
+       /* enable htop1 register if needed */
+       if (htop1_res) {
+               hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res));
+               if (!hdmi->htop1) {
+                       dev_err(&pdev->dev, "control register region already claimed\n");
+                       ret = -ENOMEM;
+                       goto emap_htop1;
+               }
+               sh_hdmi_htop1_init(hdmi);
+       }
+
        /* Product and revision IDs are 0 in sh-mobile version */
        dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
                 hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
@@ -1199,6 +1395,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
 ecodec:
        free_irq(irq, hdmi);
 ereqirq:
+       if (hdmi->htop1)
+               iounmap(hdmi->htop1);
+emap_htop1:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        iounmap(hdmi->base);
@@ -1230,6 +1429,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        clk_disable(hdmi->hdmi_clk);
        clk_put(hdmi->hdmi_clk);
+       if (hdmi->htop1)
+               iounmap(hdmi->htop1);
        iounmap(hdmi->base);
        release_mem_region(res->start, resource_size(res));
        kfree(hdmi);
index aff73842d877b3b98c287b50dc1cbdf138ed6cc7..85d6738b6c64ad2755629f6d71596da9a0f0bfd8 100644 (file)
@@ -105,51 +105,6 @@ static const unsigned short ModeIndex_1920x1440[]    = {0x68, 0x69, 0x00, 0x6b};
 static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00};
 static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e};
 
-static const unsigned short SiS_DRAMType[17][5]={
-       {0x0C,0x0A,0x02,0x40,0x39},
-       {0x0D,0x0A,0x01,0x40,0x48},
-       {0x0C,0x09,0x02,0x20,0x35},
-       {0x0D,0x09,0x01,0x20,0x44},
-       {0x0C,0x08,0x02,0x10,0x31},
-       {0x0D,0x08,0x01,0x10,0x40},
-       {0x0C,0x0A,0x01,0x20,0x34},
-       {0x0C,0x09,0x01,0x08,0x32},
-       {0x0B,0x08,0x02,0x08,0x21},
-       {0x0C,0x08,0x01,0x08,0x30},
-       {0x0A,0x08,0x02,0x04,0x11},
-       {0x0B,0x0A,0x01,0x10,0x28},
-       {0x09,0x08,0x02,0x02,0x01},
-       {0x0B,0x09,0x01,0x08,0x24},
-       {0x0B,0x08,0x01,0x04,0x20},
-       {0x0A,0x08,0x01,0x02,0x10},
-       {0x09,0x08,0x01,0x01,0x00}
-};
-
-static const unsigned short SiS_SDRDRAM_TYPE[13][5] =
-{
-       { 2,12, 9,64,0x35},
-       { 1,13, 9,64,0x44},
-       { 2,12, 8,32,0x31},
-       { 2,11, 9,32,0x25},
-       { 1,12, 9,32,0x34},
-       { 1,13, 8,32,0x40},
-       { 2,11, 8,16,0x21},
-       { 1,12, 8,16,0x30},
-       { 1,11, 9,16,0x24},
-       { 1,11, 8, 8,0x20},
-       { 2, 9, 8, 4,0x01},
-       { 1,10, 8, 4,0x10},
-       { 1, 9, 8, 2,0x00}
-};
-
-static const unsigned short SiS_DDRDRAM_TYPE[4][5] =
-{
-       { 2,12, 9,64,0x35},
-       { 2,12, 8,32,0x31},
-       { 2,11, 8,16,0x21},
-       { 2, 9, 8, 4,0x01}
-};
-
 static const unsigned char SiS_MDA_DAC[] =
 {
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
index 078ca2167d6f9754a3fc9ad26582643b82e34c8a..a7a48db64ce20d0cdad34635da4848bc0410dec1 100644 (file)
@@ -4222,6 +4222,26 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo)
        return 1;                       /* 32bit */
 }
 
+static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
+       {0x0C,0x0A,0x02,0x40,0x39},
+       {0x0D,0x0A,0x01,0x40,0x48},
+       {0x0C,0x09,0x02,0x20,0x35},
+       {0x0D,0x09,0x01,0x20,0x44},
+       {0x0C,0x08,0x02,0x10,0x31},
+       {0x0D,0x08,0x01,0x10,0x40},
+       {0x0C,0x0A,0x01,0x20,0x34},
+       {0x0C,0x09,0x01,0x08,0x32},
+       {0x0B,0x08,0x02,0x08,0x21},
+       {0x0C,0x08,0x01,0x08,0x30},
+       {0x0A,0x08,0x02,0x04,0x11},
+       {0x0B,0x0A,0x01,0x10,0x28},
+       {0x09,0x08,0x02,0x02,0x01},
+       {0x0B,0x09,0x01,0x08,0x24},
+       {0x0B,0x08,0x01,0x04,0x20},
+       {0x0A,0x08,0x01,0x02,0x10},
+       {0x09,0x08,0x01,0x01,0x00}
+};
+
 static int __devinit
 sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
                        int PseudoRankCapacity, int PseudoAdrPinCount,
@@ -4231,27 +4251,8 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
        unsigned short sr14;
        unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
        unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
-       static const unsigned short SiS_DRAMType[17][5] = {
-               {0x0C,0x0A,0x02,0x40,0x39},
-               {0x0D,0x0A,0x01,0x40,0x48},
-               {0x0C,0x09,0x02,0x20,0x35},
-               {0x0D,0x09,0x01,0x20,0x44},
-               {0x0C,0x08,0x02,0x10,0x31},
-               {0x0D,0x08,0x01,0x10,0x40},
-               {0x0C,0x0A,0x01,0x20,0x34},
-               {0x0C,0x09,0x01,0x08,0x32},
-               {0x0B,0x08,0x02,0x08,0x21},
-               {0x0C,0x08,0x01,0x08,0x30},
-               {0x0A,0x08,0x02,0x04,0x11},
-               {0x0B,0x0A,0x01,0x10,0x28},
-               {0x09,0x08,0x02,0x02,0x01},
-               {0x0B,0x09,0x01,0x08,0x24},
-               {0x0B,0x08,0x01,0x04,0x20},
-               {0x0A,0x08,0x01,0x02,0x10},
-               {0x09,0x08,0x01,0x01,0x00}
-       };
 
-        for(k = 0; k <= 16; k++) {
+        for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
 
                RankCapacity = buswidth * SiS_DRAMType[k][3];
 
index 30f7a815a62bc0f36813a644ad1819d210958ca5..5b6abc6de84ba0a823a80806f0d84c6675f0b5c0 100644 (file)
@@ -1036,6 +1036,6 @@ static void __exit xxxfb_exit(void)
      */
 
 module_init(xxxfb_init);
-module_exit(xxxfb_remove);
+module_exit(xxxfb_exit);
 
 MODULE_LICENSE("GPL");
index ccbfef5e828f3d19815dc7f91c27164c737f4250..af3ef27ad36ccd4f6db2e8983351c030116e748a 100644 (file)
@@ -846,7 +846,7 @@ static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
        }
 }
 
-int ufx_handle_damage(struct ufx_data *dev, int x, int y,
+static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
        int width, int height)
 {
        size_t packed_line_len = ALIGN((width * 2), 4);
@@ -1083,7 +1083,7 @@ static int ufx_ops_open(struct fb_info *info, int user)
 
                struct fb_deferred_io *fbdefio;
 
-               fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+               fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
                if (fbdefio) {
                        fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
index 7af1e81661828669895d85b2efa6343ed047334e..8af64148294b88074a269f54691d05a9cb00a547 100644 (file)
@@ -893,7 +893,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
 
                struct fb_deferred_io *fbdefio;
 
-               fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+               fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
                if (fbdefio) {
                        fbdefio->delay = DL_DEFIO_WRITE_DELAY;
index 0c8837565bc719cb745022b6918ba3b1e9d562a4..c80e770e180029a132087f73f3cbbfc6295a7c5e 100644 (file)
@@ -1276,17 +1276,12 @@ static int viafb_dfph_proc_open(struct inode *inode, struct file *file)
 static ssize_t viafb_dfph_proc_write(struct file *file,
        const char __user *buffer, size_t count, loff_t *pos)
 {
-       char buf[20];
-       u8 reg_val = 0;
-       unsigned long length;
-       if (count < 1)
-               return -EINVAL;
-       length = count > 20 ? 20 : count;
-       if (copy_from_user(&buf[0], buffer, length))
-               return -EFAULT;
-       buf[length - 1] = '\0'; /*Ensure end string */
-       if (kstrtou8(buf, 0, &reg_val) < 0)
-               return -EINVAL;
+       int err;
+       u8 reg_val;
+       err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+       if (err)
+               return err;
+
        viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
        return count;
 }
@@ -1316,17 +1311,12 @@ static int viafb_dfpl_proc_open(struct inode *inode, struct file *file)
 static ssize_t viafb_dfpl_proc_write(struct file *file,
        const char __user *buffer, size_t count, loff_t *pos)
 {
-       char buf[20];
-       u8 reg_val = 0;
-       unsigned long length;
-       if (count < 1)
-               return -EINVAL;
-       length = count > 20 ? 20 : count;
-       if (copy_from_user(&buf[0], buffer, length))
-               return -EFAULT;
-       buf[length - 1] = '\0'; /*Ensure end string */
-       if (kstrtou8(buf, 0, &reg_val) < 0)
-               return -EINVAL;
+       int err;
+       u8 reg_val;
+       err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+       if (err)
+               return err;
+
        viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
        return count;
 }
index bfbc15ca38ddd3a0ac57cf0860624558a35aee47..0908e604433303d605b9a7cd6ab4ffe36087f96f 100644 (file)
@@ -47,7 +47,7 @@ struct virtio_balloon
        struct task_struct *thread;
 
        /* Waiting for host to ack the pages we released. */
-       struct completion acked;
+       wait_queue_head_t acked;
 
        /* Number of balloon pages we've told the Host we're not using. */
        unsigned int num_pages;
@@ -89,29 +89,25 @@ static struct page *balloon_pfn_to_page(u32 pfn)
 
 static void balloon_ack(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (vb)
-               complete(&vb->acked);
+       wake_up(&vb->acked);
 }
 
 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
 {
        struct scatterlist sg;
+       unsigned int len;
 
        sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
 
-       init_completion(&vb->acked);
-
        /* We should always be able to add one buffer to an empty queue. */
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
        virtqueue_kick(vq);
 
        /* When host has read buffer, this completes via balloon_ack */
-       wait_for_completion(&vb->acked);
+       wait_event(vb->acked, virtqueue_get_buf(vq, &len));
 }
 
 static void set_page_pfns(u32 pfns[], struct page *page)
@@ -231,12 +227,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
  */
 static void stats_request(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (!vb)
-               return;
        vb->need_stats_update = 1;
        wake_up(&vb->config_change);
 }
@@ -245,11 +237,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
 {
        struct virtqueue *vq;
        struct scatterlist sg;
+       unsigned int len;
 
        vb->need_stats_update = 0;
        update_balloon_stats(vb);
 
        vq = vb->stats_vq;
+       if (!virtqueue_get_buf(vq, &len))
+               return;
        sg_init_one(&sg, vb->stats, sizeof(vb->stats));
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
@@ -358,6 +353,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
        INIT_LIST_HEAD(&vb->pages);
        vb->num_pages = 0;
        init_waitqueue_head(&vb->config_change);
+       init_waitqueue_head(&vb->acked);
        vb->vdev = vdev;
        vb->need_stats_update = 0;
 
index d92d7488be16677a6436e7224ebaaaf1c087fe57..fe819b76de5685f2cf28a2db9000a117ca8338ce 100644 (file)
@@ -64,6 +64,18 @@ config SOFT_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called softdog.
 
+config DA9052_WATCHDOG
+        tristate "Dialog DA9052 Watchdog"
+        depends on PMIC_DA9052
+        select WATCHDOG_CORE
+        help
+          Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+          cause system reset.
+
+          Say Y here to include support for the DA9052 watchdog.
+          Alternatively say M to compile the driver as a module,
+          which will be called da9052_wdt.
+
 config WM831X_WATCHDOG
        tristate "WM831x watchdog"
        depends on MFD_WM831X
@@ -87,6 +99,7 @@ config WM8350_WATCHDOG
 config ARM_SP805_WATCHDOG
        tristate "ARM SP805 Watchdog"
        depends on ARM_AMBA
+       select WATCHDOG_CORE
        help
          ARM Primecell SP805 Watchdog timer. This will reboot your system when
          the timeout is reached.
index 442bfbe0882a29206035d17c0faa5ca629c7af45..572b39bed06a256ff6c79df0b943acbda7e88174 100644 (file)
@@ -163,6 +163,7 @@ obj-$(CONFIG_WATCHDOG_CP1XXX)               += cpwd.o
 obj-$(CONFIG_XEN_WDT) += xen_wdt.o
 
 # Architecture Independent
+obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
 obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
 obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
 obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
new file mode 100644 (file)
index 0000000..3f75129
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * System monitoring driver for DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Anthony Olech <Anthony.Olech@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/watchdog.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define DA9052_DEF_TIMEOUT     4
+#define DA9052_TWDMIN          256
+
+struct da9052_wdt_data {
+       struct watchdog_device wdt;
+       struct da9052 *da9052;
+       struct kref kref;
+       unsigned long jpast;
+};
+
+static const struct {
+       u8 reg_val;
+       int time;  /* Seconds */
+} da9052_wdt_maps[] = {
+       { 1, 2 },
+       { 2, 4 },
+       { 3, 8 },
+       { 4, 16 },
+       { 5, 32 },
+       { 5, 33 },  /* Actual time  32.768s so included both 32s and 33s */
+       { 6, 65 },
+       { 6, 66 },  /* Actual time 65.536s so include both, 65s and 66s */
+       { 7, 131 },
+};
+
+
+static void da9052_wdt_release_resources(struct kref *r)
+{
+       struct da9052_wdt_data *driver_data =
+               container_of(r, struct da9052_wdt_data, kref);
+
+       kfree(driver_data);
+}
+
+static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
+                                 unsigned int timeout)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+       struct da9052 *da9052 = driver_data->da9052;
+       int ret, i;
+
+       /*
+        * Disable the Watchdog timer before setting
+        * new time out.
+        */
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_TWDSCALE, 0);
+       if (ret < 0) {
+               dev_err(da9052->dev, "Failed to disable watchdog bit, %d\n",
+                       ret);
+               return ret;
+       }
+       if (timeout) {
+               /*
+                * To change the timeout, da9052 needs to
+                * be disabled for at least 150 us.
+                */
+               udelay(150);
+
+               /* Set the desired timeout */
+               for (i = 0; i < ARRAY_SIZE(da9052_wdt_maps); i++)
+                       if (da9052_wdt_maps[i].time == timeout)
+                               break;
+
+               if (i == ARRAY_SIZE(da9052_wdt_maps))
+                       ret = -EINVAL;
+               else
+                       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                                               DA9052_CONTROLD_TWDSCALE,
+                                               da9052_wdt_maps[i].reg_val);
+               if (ret < 0) {
+                       dev_err(da9052->dev,
+                               "Failed to update timescale bit, %d\n", ret);
+                       return ret;
+               }
+
+               wdt_dev->timeout = timeout;
+               driver_data->jpast = jiffies;
+       }
+
+       return 0;
+}
+
+static void da9052_wdt_ref(struct watchdog_device *wdt_dev)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+       kref_get(&driver_data->kref);
+}
+
+static void da9052_wdt_unref(struct watchdog_device *wdt_dev)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+       kref_put(&driver_data->kref, da9052_wdt_release_resources);
+}
+
+static int da9052_wdt_start(struct watchdog_device *wdt_dev)
+{
+       return da9052_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9052_wdt_stop(struct watchdog_device *wdt_dev)
+{
+       return da9052_wdt_set_timeout(wdt_dev, 0);
+}
+
+static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+       struct da9052 *da9052 = driver_data->da9052;
+       unsigned long msec, jnow = jiffies;
+       int ret;
+
+       /*
+        * We have a minimum time for watchdog window called TWDMIN. A write
+        * to the watchdog before this elapsed time should cause an error.
+        */
+       msec = (jnow - driver_data->jpast) * 1000/HZ;
+       if (msec < DA9052_TWDMIN)
+               mdelay(msec);
+
+       /* Reset the watchdog timer */
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_WATCHDOG, 1 << 7);
+       if (ret < 0)
+               goto err_strobe;
+
+       /*
+        * FIXME: Reset the watchdog core, in general PMIC
+        * is supposed to do this
+        */
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_WATCHDOG, 0 << 7);
+err_strobe:
+       return ret;
+}
+
+static struct watchdog_info da9052_wdt_info = {
+       .options        = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity       = "DA9052 Watchdog",
+};
+
+static const struct watchdog_ops da9052_wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = da9052_wdt_start,
+       .stop = da9052_wdt_stop,
+       .ping = da9052_wdt_ping,
+       .set_timeout = da9052_wdt_set_timeout,
+       .ref = da9052_wdt_ref,
+       .unref = da9052_wdt_unref,
+};
+
+
+static int __devinit da9052_wdt_probe(struct platform_device *pdev)
+{
+       struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+       struct da9052_wdt_data *driver_data;
+       struct watchdog_device *da9052_wdt;
+       int ret;
+
+       driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+                                  GFP_KERNEL);
+       if (!driver_data) {
+               dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       driver_data->da9052 = da9052;
+
+       da9052_wdt = &driver_data->wdt;
+
+       da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+       da9052_wdt->info = &da9052_wdt_info;
+       da9052_wdt->ops = &da9052_wdt_ops;
+       watchdog_set_drvdata(da9052_wdt, driver_data);
+
+       kref_init(&driver_data->kref);
+
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_TWDSCALE, 0);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
+                       ret);
+               goto err;
+       }
+
+       ret = watchdog_register_device(&driver_data->wdt);
+       if (ret != 0) {
+               dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
+                       ret);
+               goto err;
+       }
+
+       dev_set_drvdata(&pdev->dev, driver_data);
+err:
+       return ret;
+}
+
+static int __devexit da9052_wdt_remove(struct platform_device *pdev)
+{
+       struct da9052_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+       watchdog_unregister_device(&driver_data->wdt);
+       kref_put(&driver_data->kref, da9052_wdt_release_resources);
+
+       return 0;
+}
+
+static struct platform_driver da9052_wdt_driver = {
+       .probe = da9052_wdt_probe,
+       .remove = __devexit_p(da9052_wdt_remove),
+       .driver = {
+               .name   = "da9052-watchdog",
+       },
+};
+
+module_platform_driver(da9052_wdt_driver);
+
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 SM Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-watchdog");
index 2b763815aeecc90ebad7fdf36f17de949d365bfd..1eff743ec4970071f8451704fb51d4b06de70950 100644 (file)
@@ -146,7 +146,7 @@ struct cmn_registers {
 }  __attribute__((packed));
 
 static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump;
+static unsigned int allow_kdump = 1;
 static unsigned int is_icru;
 static DEFINE_SPINLOCK(rom_lock);
 static void *cru_rom_addr;
@@ -756,6 +756,8 @@ error:
 static void hpwdt_exit_nmi_decoding(void)
 {
        unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+       unregister_nmi_handler(NMI_SERR, "hpwdt");
+       unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
        if (cru_rom_addr)
                iounmap(cru_rom_addr);
 }
index 741528b032e2482aa310b729bb8bf825b8a65b18..9c2c27c3b4240b5c2ecc71aa82ab5b40704240b6 100644 (file)
@@ -575,7 +575,7 @@ static int __devinit iTCO_wdt_probe(struct platform_device *dev)
        if (!request_region(iTCO_wdt_private.smi_res->start,
                        resource_size(iTCO_wdt_private.smi_res), dev->name)) {
                pr_err("I/O address 0x%04llx already in use, device disabled\n",
-                      SMI_EN);
+                      (u64)SMI_EN);
                ret = -EBUSY;
                goto unmap_gcs;
        }
@@ -592,13 +592,13 @@ static int __devinit iTCO_wdt_probe(struct platform_device *dev)
        if (!request_region(iTCO_wdt_private.tco_res->start,
                        resource_size(iTCO_wdt_private.tco_res), dev->name)) {
                pr_err("I/O address 0x%04llx already in use, device disabled\n",
-                      TCOBASE);
+                      (u64)TCOBASE);
                ret = -EBUSY;
                goto unreg_smi;
        }
 
        pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
-               ich_info->name, ich_info->iTCO_version, TCOBASE);
+               ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
 
        /* Clear out the (probably old) status */
        outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
@@ -699,3 +699,4 @@ MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRV_NAME);
index bbb170e50055d43e55ff783f4aa645ec45ae1d85..e4841c36798bd758d9a037f8d89de467047d88ae 100644 (file)
@@ -4,7 +4,7 @@
  * Watchdog driver for ARM SP805 watchdog module
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2 or later. This program is licensed "as is" without any
 #include <linux/amba/bus.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
-#include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/math64.h>
-#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-#include <linux/uaccess.h>
 #include <linux/watchdog.h>
 
 /* default timeout in seconds */
@@ -56,6 +53,7 @@
 
 /**
  * struct sp805_wdt: sp805 wdt device structure
+ * @wdd: instance of struct watchdog_device
  * @lock: spin lock protecting dev structure and io access
  * @base: base address of wdt
  * @clk: clock structure of wdt
  * @timeout: current programmed timeout
  */
 struct sp805_wdt {
+       struct watchdog_device          wdd;
        spinlock_t                      lock;
        void __iomem                    *base;
        struct clk                      *clk;
        struct amba_device              *adev;
-       unsigned long                   status;
-       #define WDT_BUSY                0
-       #define WDT_CAN_BE_CLOSED       1
        unsigned int                    load_val;
        unsigned int                    timeout;
 };
 
-/* local variables */
-static struct sp805_wdt *wdt;
 static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+               "Set to 1 to keep watchdog running after device release");
 
 /* This routine finds load value that will reset system in required timout */
-static void wdt_setload(unsigned int timeout)
+static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
 {
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
        u64 load, rate;
 
        rate = clk_get_rate(wdt->clk);
@@ -103,11 +101,14 @@ static void wdt_setload(unsigned int timeout)
        /* roundup timeout to closest positive integer value */
        wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
        spin_unlock(&wdt->lock);
+
+       return 0;
 }
 
 /* returns number of seconds left for reset to occur */
-static u32 wdt_timeleft(void)
+static unsigned int wdt_timeleft(struct watchdog_device *wdd)
 {
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
        u64 load, rate;
 
        rate = clk_get_rate(wdt->clk);
@@ -123,166 +124,96 @@ static u32 wdt_timeleft(void)
        return div_u64(load, rate);
 }
 
-/* enables watchdog timers reset */
-static void wdt_enable(void)
+static int wdt_config(struct watchdog_device *wdd, bool ping)
 {
-       spin_lock(&wdt->lock);
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+       int ret;
 
-       writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
-       writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
-       writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
-       writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
-       writel_relaxed(LOCK, wdt->base + WDTLOCK);
+       if (!ping) {
+               ret = clk_prepare(wdt->clk);
+               if (ret) {
+                       dev_err(&wdt->adev->dev, "clock prepare fail");
+                       return ret;
+               }
 
-       /* Flush posted writes. */
-       readl_relaxed(wdt->base + WDTLOCK);
-       spin_unlock(&wdt->lock);
-}
+               ret = clk_enable(wdt->clk);
+               if (ret) {
+                       dev_err(&wdt->adev->dev, "clock enable fail");
+                       clk_unprepare(wdt->clk);
+                       return ret;
+               }
+       }
 
-/* disables watchdog timers reset */
-static void wdt_disable(void)
-{
        spin_lock(&wdt->lock);
 
        writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
-       writel_relaxed(0, wdt->base + WDTCONTROL);
+       writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+
+       if (!ping) {
+               writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+               writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
+                               WDTCONTROL);
+       }
+
        writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
        /* Flush posted writes. */
        readl_relaxed(wdt->base + WDTLOCK);
        spin_unlock(&wdt->lock);
+
+       return 0;
 }
 
-static ssize_t sp805_wdt_write(struct file *file, const char *data,
-               size_t len, loff_t *ppos)
+static int wdt_ping(struct watchdog_device *wdd)
 {
-       if (len) {
-               if (!nowayout) {
-                       size_t i;
-
-                       clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-
-                       for (i = 0; i != len; i++) {
-                               char c;
-
-                               if (get_user(c, data + i))
-                                       return -EFAULT;
-                               /* Check for Magic Close character */
-                               if (c == 'V') {
-                                       set_bit(WDT_CAN_BE_CLOSED,
-                                                       &wdt->status);
-                                       break;
-                               }
-                       }
-               }
-               wdt_enable();
-       }
-       return len;
+       return wdt_config(wdd, true);
 }
 
-static const struct watchdog_info ident = {
-       .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-       .identity = MODULE_NAME,
-};
-
-static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
-               unsigned long arg)
+/* enables watchdog timers reset */
+static int wdt_enable(struct watchdog_device *wdd)
 {
-       int ret = -ENOTTY;
-       unsigned int timeout;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               ret = copy_to_user((struct watchdog_info *)arg, &ident,
-                               sizeof(ident)) ? -EFAULT : 0;
-               break;
-
-       case WDIOC_GETSTATUS:
-               ret = put_user(0, (int *)arg);
-               break;
-
-       case WDIOC_KEEPALIVE:
-               wdt_enable();
-               ret = 0;
-               break;
-
-       case WDIOC_SETTIMEOUT:
-               ret = get_user(timeout, (unsigned int *)arg);
-               if (ret)
-                       break;
-
-               wdt_setload(timeout);
-
-               wdt_enable();
-               /* Fall through */
-
-       case WDIOC_GETTIMEOUT:
-               ret = put_user(wdt->timeout, (unsigned int *)arg);
-               break;
-       case WDIOC_GETTIMELEFT:
-               ret = put_user(wdt_timeleft(), (unsigned int *)arg);
-               break;
-       }
-       return ret;
+       return wdt_config(wdd, false);
 }
 
-static int sp805_wdt_open(struct inode *inode, struct file *file)
+/* disables watchdog timers reset */
+static int wdt_disable(struct watchdog_device *wdd)
 {
-       int ret = 0;
-
-       if (test_and_set_bit(WDT_BUSY, &wdt->status))
-               return -EBUSY;
-
-       ret = clk_enable(wdt->clk);
-       if (ret) {
-               dev_err(&wdt->adev->dev, "clock enable fail");
-               goto err;
-       }
-
-       wdt_enable();
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
 
-       /* can not be closed, once enabled */
-       clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-       return nonseekable_open(inode, file);
+       spin_lock(&wdt->lock);
 
-err:
-       clear_bit(WDT_BUSY, &wdt->status);
-       return ret;
-}
+       writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
+       writel_relaxed(0, wdt->base + WDTCONTROL);
+       writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
-static int sp805_wdt_release(struct inode *inode, struct file *file)
-{
-       if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
-               clear_bit(WDT_BUSY, &wdt->status);
-               dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
-               return 0;
-       }
+       /* Flush posted writes. */
+       readl_relaxed(wdt->base + WDTLOCK);
+       spin_unlock(&wdt->lock);
 
-       wdt_disable();
        clk_disable(wdt->clk);
-       clear_bit(WDT_BUSY, &wdt->status);
+       clk_unprepare(wdt->clk);
 
        return 0;
 }
 
-static const struct file_operations sp805_wdt_fops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .write = sp805_wdt_write,
-       .unlocked_ioctl = sp805_wdt_ioctl,
-       .open = sp805_wdt_open,
-       .release = sp805_wdt_release,
+static const struct watchdog_info wdt_info = {
+       .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity = MODULE_NAME,
 };
 
-static struct miscdevice sp805_wdt_miscdev = {
-       .minor = WATCHDOG_MINOR,
-       .name = "watchdog",
-       .fops = &sp805_wdt_fops,
+static const struct watchdog_ops wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = wdt_enable,
+       .stop           = wdt_disable,
+       .ping           = wdt_ping,
+       .set_timeout    = wdt_setload,
+       .get_timeleft   = wdt_timeleft,
 };
 
 static int __devinit
 sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
 {
+       struct sp805_wdt *wdt;
        int ret = 0;
 
        if (!devm_request_mem_region(&adev->dev, adev->res.start,
@@ -315,19 +246,26 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
        }
 
        wdt->adev = adev;
+       wdt->wdd.info = &wdt_info;
+       wdt->wdd.ops = &wdt_ops;
+
        spin_lock_init(&wdt->lock);
-       wdt_setload(DEFAULT_TIMEOUT);
+       watchdog_set_nowayout(&wdt->wdd, nowayout);
+       watchdog_set_drvdata(&wdt->wdd, wdt);
+       wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
 
-       ret = misc_register(&sp805_wdt_miscdev);
-       if (ret < 0) {
-               dev_warn(&adev->dev, "cannot register misc device\n");
-               goto err_misc_register;
+       ret = watchdog_register_device(&wdt->wdd);
+       if (ret) {
+               dev_err(&adev->dev, "watchdog_register_device() failed: %d\n",
+                               ret);
+               goto err_register;
        }
+       amba_set_drvdata(adev, wdt);
 
        dev_info(&adev->dev, "registration successful\n");
        return 0;
 
-err_misc_register:
+err_register:
        clk_put(wdt->clk);
 err:
        dev_err(&adev->dev, "Probe Failed!!!\n");
@@ -336,7 +274,11 @@ err:
 
 static int __devexit sp805_wdt_remove(struct amba_device *adev)
 {
-       misc_deregister(&sp805_wdt_miscdev);
+       struct sp805_wdt *wdt = amba_get_drvdata(adev);
+
+       watchdog_unregister_device(&wdt->wdd);
+       amba_set_drvdata(adev, NULL);
+       watchdog_set_drvdata(&wdt->wdd, NULL);
        clk_put(wdt->clk);
 
        return 0;
@@ -345,28 +287,22 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
 #ifdef CONFIG_PM
 static int sp805_wdt_suspend(struct device *dev)
 {
-       if (test_bit(WDT_BUSY, &wdt->status)) {
-               wdt_disable();
-               clk_disable(wdt->clk);
-       }
+       struct sp805_wdt *wdt = dev_get_drvdata(dev);
+
+       if (watchdog_active(&wdt->wdd))
+               return wdt_disable(&wdt->wdd);
 
        return 0;
 }
 
 static int sp805_wdt_resume(struct device *dev)
 {
-       int ret = 0;
+       struct sp805_wdt *wdt = dev_get_drvdata(dev);
 
-       if (test_bit(WDT_BUSY, &wdt->status)) {
-               ret = clk_enable(wdt->clk);
-               if (ret) {
-                       dev_err(dev, "clock enable fail");
-                       return ret;
-               }
-               wdt_enable();
-       }
+       if (watchdog_active(&wdt->wdd))
+               return wdt_enable(&wdt->wdd);
 
-       return ret;
+       return 0;
 }
 #endif /* CONFIG_PM */
 
@@ -395,11 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
 
 module_amba_driver(sp805_wdt_driver);
 
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
-               "Set to 1 to keep watchdog running after device release");
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index 5603e31afdab03e8d67ad97151f772b879dd015e..aa50da3ccfe3678f8a740eab41abcb61e7beefef 100644 (file)
@@ -91,7 +91,7 @@ static inline void wdt_reset(void)
 static void wdt_timer_tick(unsigned long data)
 {
        if (time_before(jiffies, next_heartbeat) ||
-          (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+          (!watchdog_active(&wdt_dev))) {
                wdt_reset();
                mod_timer(&timer, jiffies + WDT_HEARTBEAT);
        } else
index 14d768bfa267d78923a467484ed1f66275471bb2..6aa46a90ff028691f97627765f593d864bc885a1 100644 (file)
 #include <linux/kernel.h>      /* For printk/panic/... */
 #include <linux/watchdog.h>    /* For watchdog specific items */
 #include <linux/init.h>                /* For __init/__exit/... */
+#include <linux/idr.h>         /* For ida_* macros */
+#include <linux/err.h>         /* For IS_ERR macros */
 
-#include "watchdog_dev.h"      /* For watchdog_dev_register/... */
+#include "watchdog_core.h"     /* For watchdog_dev_register/... */
+
+static DEFINE_IDA(watchdog_ida);
+static struct class *watchdog_class;
 
 /**
  * watchdog_register_device() - register a watchdog device
@@ -49,7 +54,7 @@
  */
 int watchdog_register_device(struct watchdog_device *wdd)
 {
-       int ret;
+       int ret, id, devno;
 
        if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
                return -EINVAL;
@@ -74,10 +79,38 @@ int watchdog_register_device(struct watchdog_device *wdd)
         * corrupted in a later stage then we expect a kernel panic!
         */
 
-       /* We only support 1 watchdog device via the /dev/watchdog interface */
+       mutex_init(&wdd->lock);
+       id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+       if (id < 0)
+               return id;
+       wdd->id = id;
+
        ret = watchdog_dev_register(wdd);
        if (ret) {
-               pr_err("error registering /dev/watchdog (err=%d)\n", ret);
+               ida_simple_remove(&watchdog_ida, id);
+               if (!(id == 0 && ret == -EBUSY))
+                       return ret;
+
+               /* Retry in case a legacy watchdog module exists */
+               id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+               if (id < 0)
+                       return id;
+               wdd->id = id;
+
+               ret = watchdog_dev_register(wdd);
+               if (ret) {
+                       ida_simple_remove(&watchdog_ida, id);
+                       return ret;
+               }
+       }
+
+       devno = wdd->cdev.dev;
+       wdd->dev = device_create(watchdog_class, wdd->parent, devno,
+                                       NULL, "watchdog%d", wdd->id);
+       if (IS_ERR(wdd->dev)) {
+               watchdog_dev_unregister(wdd);
+               ida_simple_remove(&watchdog_ida, id);
+               ret = PTR_ERR(wdd->dev);
                return ret;
        }
 
@@ -95,6 +128,7 @@ EXPORT_SYMBOL_GPL(watchdog_register_device);
 void watchdog_unregister_device(struct watchdog_device *wdd)
 {
        int ret;
+       int devno = wdd->cdev.dev;
 
        if (wdd == NULL)
                return;
@@ -102,9 +136,41 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
        ret = watchdog_dev_unregister(wdd);
        if (ret)
                pr_err("error unregistering /dev/watchdog (err=%d)\n", ret);
+       device_destroy(watchdog_class, devno);
+       ida_simple_remove(&watchdog_ida, wdd->id);
+       wdd->dev = NULL;
 }
 EXPORT_SYMBOL_GPL(watchdog_unregister_device);
 
+static int __init watchdog_init(void)
+{
+       int err;
+
+       watchdog_class = class_create(THIS_MODULE, "watchdog");
+       if (IS_ERR(watchdog_class)) {
+               pr_err("couldn't create class\n");
+               return PTR_ERR(watchdog_class);
+       }
+
+       err = watchdog_dev_init();
+       if (err < 0) {
+               class_destroy(watchdog_class);
+               return err;
+       }
+
+       return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+       watchdog_dev_exit();
+       class_destroy(watchdog_class);
+       ida_destroy(&watchdog_ida);
+}
+
+subsys_initcall(watchdog_init);
+module_exit(watchdog_exit);
+
 MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
 MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
 MODULE_DESCRIPTION("WatchDog Timer Driver Core");
similarity index 79%
rename from drivers/watchdog/watchdog_dev.h
rename to drivers/watchdog/watchdog_core.h
index bc7612be25cefe6e3267621ad8e570e355250367..6c951418fca765deebf2348de2fb078436bd8517 100644 (file)
  *     This material is provided "AS-IS" and at no charge.
  */
 
+#define MAX_DOGS       32      /* Maximum number of watchdog devices */
+
 /*
  *     Functions/procedures to be called by the core
  */
-int watchdog_dev_register(struct watchdog_device *);
-int watchdog_dev_unregister(struct watchdog_device *);
+extern int watchdog_dev_register(struct watchdog_device *);
+extern int watchdog_dev_unregister(struct watchdog_device *);
+extern int __init watchdog_dev_init(void);
+extern void __exit watchdog_dev_exit(void);
index 8558da912c42fd76c5bde4ddd55528ed0be48615..ef8edecfc526cedaa27bb22b23a2729289221f1b 100644 (file)
 #include <linux/init.h>                /* For __init/__exit/... */
 #include <linux/uaccess.h>     /* For copy_to_user/put_user/... */
 
-/* make sure we only register one /dev/watchdog device */
-static unsigned long watchdog_dev_busy;
+#include "watchdog_core.h"
+
+/* the dev_t structure to store the dynamically allocated watchdog devices */
+static dev_t watchdog_devt;
 /* the watchdog device behind /dev/watchdog */
-static struct watchdog_device *wdd;
+static struct watchdog_device *old_wdd;
 
 /*
  *     watchdog_ping: ping the watchdog.
@@ -59,13 +61,26 @@ static struct watchdog_device *wdd;
 
 static int watchdog_ping(struct watchdog_device *wddev)
 {
-       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
-               if (wddev->ops->ping)
-                       return wddev->ops->ping(wddev);  /* ping the watchdog */
-               else
-                       return wddev->ops->start(wddev); /* restart watchdog */
+       int err = 0;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_ping;
        }
-       return 0;
+
+       if (!watchdog_active(wddev))
+               goto out_ping;
+
+       if (wddev->ops->ping)
+               err = wddev->ops->ping(wddev);  /* ping the watchdog */
+       else
+               err = wddev->ops->start(wddev); /* restart watchdog */
+
+out_ping:
+       mutex_unlock(&wddev->lock);
+       return err;
 }
 
 /*
@@ -79,16 +94,25 @@ static int watchdog_ping(struct watchdog_device *wddev)
 
 static int watchdog_start(struct watchdog_device *wddev)
 {
-       int err;
+       int err = 0;
 
-       if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
-               err = wddev->ops->start(wddev);
-               if (err < 0)
-                       return err;
+       mutex_lock(&wddev->lock);
 
-               set_bit(WDOG_ACTIVE, &wddev->status);
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_start;
        }
-       return 0;
+
+       if (watchdog_active(wddev))
+               goto out_start;
+
+       err = wddev->ops->start(wddev);
+       if (err == 0)
+               set_bit(WDOG_ACTIVE, &wddev->status);
+
+out_start:
+       mutex_unlock(&wddev->lock);
+       return err;
 }
 
 /*
@@ -103,22 +127,155 @@ static int watchdog_start(struct watchdog_device *wddev)
 
 static int watchdog_stop(struct watchdog_device *wddev)
 {
-       int err = -EBUSY;
+       int err = 0;
 
-       if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
-               pr_info("%s: nowayout prevents watchdog to be stopped!\n",
-                                                       wddev->info->identity);
-               return err;
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_stop;
        }
 
-       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
-               err = wddev->ops->stop(wddev);
-               if (err < 0)
-                       return err;
+       if (!watchdog_active(wddev))
+               goto out_stop;
 
+       if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
+               dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n");
+               err = -EBUSY;
+               goto out_stop;
+       }
+
+       err = wddev->ops->stop(wddev);
+       if (err == 0)
                clear_bit(WDOG_ACTIVE, &wddev->status);
+
+out_stop:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_get_status: wrapper to get the watchdog status
+ *     @wddev: the watchdog device to get the status from
+ *     @status: the status of the watchdog device
+ *
+ *     Get the watchdog's status flags.
+ */
+
+static int watchdog_get_status(struct watchdog_device *wddev,
+                                                       unsigned int *status)
+{
+       int err = 0;
+
+       *status = 0;
+       if (!wddev->ops->status)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_status;
        }
-       return 0;
+
+       *status = wddev->ops->status(wddev);
+
+out_status:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_set_timeout: set the watchdog timer timeout
+ *     @wddev: the watchdog device to set the timeout for
+ *     @timeout: timeout to set in seconds
+ */
+
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+                                                       unsigned int timeout)
+{
+       int err;
+
+       if ((wddev->ops->set_timeout == NULL) ||
+           !(wddev->info->options & WDIOF_SETTIMEOUT))
+               return -EOPNOTSUPP;
+
+       if ((wddev->max_timeout != 0) &&
+           (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+               return -EINVAL;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_timeout;
+       }
+
+       err = wddev->ops->set_timeout(wddev, timeout);
+
+out_timeout:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_get_timeleft: wrapper to get the time left before a reboot
+ *     @wddev: the watchdog device to get the remaining time from
+ *     @timeleft: the time that's left
+ *
+ *     Get the time before a watchdog will reboot (if not pinged).
+ */
+
+static int watchdog_get_timeleft(struct watchdog_device *wddev,
+                                                       unsigned int *timeleft)
+{
+       int err = 0;
+
+       *timeleft = 0;
+       if (!wddev->ops->get_timeleft)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_timeleft;
+       }
+
+       *timeleft = wddev->ops->get_timeleft(wddev);
+
+out_timeleft:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
+ *     @wddev: the watchdog device to do the ioctl on
+ *     @cmd: watchdog command
+ *     @arg: argument pointer
+ */
+
+static int watchdog_ioctl_op(struct watchdog_device *wddev, unsigned int cmd,
+                                                       unsigned long arg)
+{
+       int err;
+
+       if (!wddev->ops->ioctl)
+               return -ENOIOCTLCMD;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_ioctl;
+       }
+
+       err = wddev->ops->ioctl(wddev, cmd, arg);
+
+out_ioctl:
+       mutex_unlock(&wddev->lock);
+       return err;
 }
 
 /*
@@ -136,6 +293,7 @@ static int watchdog_stop(struct watchdog_device *wddev)
 static ssize_t watchdog_write(struct file *file, const char __user *data,
                                                size_t len, loff_t *ppos)
 {
+       struct watchdog_device *wdd = file->private_data;
        size_t i;
        char c;
 
@@ -175,23 +333,24 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
 static long watchdog_ioctl(struct file *file, unsigned int cmd,
                                                        unsigned long arg)
 {
+       struct watchdog_device *wdd = file->private_data;
        void __user *argp = (void __user *)arg;
        int __user *p = argp;
        unsigned int val;
        int err;
 
-       if (wdd->ops->ioctl) {
-               err = wdd->ops->ioctl(wdd, cmd, arg);
-               if (err != -ENOIOCTLCMD)
-                       return err;
-       }
+       err = watchdog_ioctl_op(wdd, cmd, arg);
+       if (err != -ENOIOCTLCMD)
+               return err;
 
        switch (cmd) {
        case WDIOC_GETSUPPORT:
                return copy_to_user(argp, wdd->info,
                        sizeof(struct watchdog_info)) ? -EFAULT : 0;
        case WDIOC_GETSTATUS:
-               val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+               err = watchdog_get_status(wdd, &val);
+               if (err == -ENODEV)
+                       return err;
                return put_user(val, p);
        case WDIOC_GETBOOTSTATUS:
                return put_user(wdd->bootstatus, p);
@@ -215,15 +374,9 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                watchdog_ping(wdd);
                return 0;
        case WDIOC_SETTIMEOUT:
-               if ((wdd->ops->set_timeout == NULL) ||
-                   !(wdd->info->options & WDIOF_SETTIMEOUT))
-                       return -EOPNOTSUPP;
                if (get_user(val, p))
                        return -EFAULT;
-               if ((wdd->max_timeout != 0) &&
-                   (val < wdd->min_timeout || val > wdd->max_timeout))
-                               return -EINVAL;
-               err = wdd->ops->set_timeout(wdd, val);
+               err = watchdog_set_timeout(wdd, val);
                if (err < 0)
                        return err;
                /* If the watchdog is active then we send a keepalive ping
@@ -237,21 +390,21 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                        return -EOPNOTSUPP;
                return put_user(wdd->timeout, p);
        case WDIOC_GETTIMELEFT:
-               if (!wdd->ops->get_timeleft)
-                       return -EOPNOTSUPP;
-
-               return put_user(wdd->ops->get_timeleft(wdd), p);
+               err = watchdog_get_timeleft(wdd, &val);
+               if (err)
+                       return err;
+               return put_user(val, p);
        default:
                return -ENOTTY;
        }
 }
 
 /*
- *     watchdog_open: open the /dev/watchdog device.
+ *     watchdog_open: open the /dev/watchdog* devices.
  *     @inode: inode of device
  *     @file: file handle to device
  *
- *     When the /dev/watchdog device gets opened, we start the watchdog.
+ *     When the /dev/watchdog* device gets opened, we start the watchdog.
  *     Watch out: the /dev/watchdog device is single open, so we make sure
  *     it can only be opened once.
  */
@@ -259,6 +412,13 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
 static int watchdog_open(struct inode *inode, struct file *file)
 {
        int err = -EBUSY;
+       struct watchdog_device *wdd;
+
+       /* Get the corresponding watchdog device */
+       if (imajor(inode) == MISC_MAJOR)
+               wdd = old_wdd;
+       else
+               wdd = container_of(inode->i_cdev, struct watchdog_device, cdev);
 
        /* the watchdog is single open! */
        if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
@@ -275,6 +435,11 @@ static int watchdog_open(struct inode *inode, struct file *file)
        if (err < 0)
                goto out_mod;
 
+       file->private_data = wdd;
+
+       if (wdd->ops->ref)
+               wdd->ops->ref(wdd);
+
        /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
        return nonseekable_open(inode, file);
 
@@ -286,9 +451,9 @@ out:
 }
 
 /*
- *      watchdog_release: release the /dev/watchdog device.
- *      @inode: inode of device
- *      @file: file handle to device
+ *     watchdog_release: release the watchdog device.
+ *     @inode: inode of device
+ *     @file: file handle to device
  *
  *     This is the code for when /dev/watchdog gets closed. We will only
  *     stop the watchdog when we have received the magic char (and nowayout
@@ -297,6 +462,7 @@ out:
 
 static int watchdog_release(struct inode *inode, struct file *file)
 {
+       struct watchdog_device *wdd = file->private_data;
        int err = -EBUSY;
 
        /*
@@ -310,7 +476,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
 
        /* If the watchdog was not stopped, send a keepalive ping */
        if (err < 0) {
-               pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+               mutex_lock(&wdd->lock);
+               if (!test_bit(WDOG_UNREGISTERED, &wdd->status))
+                       dev_crit(wdd->dev, "watchdog did not stop!\n");
+               mutex_unlock(&wdd->lock);
                watchdog_ping(wdd);
        }
 
@@ -320,6 +489,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
        /* make sure that /dev/watchdog can be re-opened */
        clear_bit(WDOG_DEV_OPEN, &wdd->status);
 
+       /* Note wdd may be gone after this, do not use after this! */
+       if (wdd->ops->unref)
+               wdd->ops->unref(wdd);
+
        return 0;
 }
 
@@ -338,62 +511,92 @@ static struct miscdevice watchdog_miscdev = {
 };
 
 /*
- *     watchdog_dev_register:
+ *     watchdog_dev_register: register a watchdog device
  *     @watchdog: watchdog device
  *
- *     Register a watchdog device as /dev/watchdog. /dev/watchdog
- *     is actually a miscdevice and thus we set it up like that.
+ *     Register a watchdog device including handling the legacy
+ *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ *     thus we set it up like that.
  */
 
 int watchdog_dev_register(struct watchdog_device *watchdog)
 {
-       int err;
-
-       /* Only one device can register for /dev/watchdog */
-       if (test_and_set_bit(0, &watchdog_dev_busy)) {
-               pr_err("only one watchdog can use /dev/watchdog\n");
-               return -EBUSY;
+       int err, devno;
+
+       if (watchdog->id == 0) {
+               watchdog_miscdev.parent = watchdog->parent;
+               err = misc_register(&watchdog_miscdev);
+               if (err != 0) {
+                       pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+                               watchdog->info->identity, WATCHDOG_MINOR, err);
+                       if (err == -EBUSY)
+                               pr_err("%s: a legacy watchdog module is probably present.\n",
+                                       watchdog->info->identity);
+                       return err;
+               }
+               old_wdd = watchdog;
        }
 
-       wdd = watchdog;
-
-       err = misc_register(&watchdog_miscdev);
-       if (err != 0) {
-               pr_err("%s: cannot register miscdev on minor=%d (err=%d)\n",
-                      watchdog->info->identity, WATCHDOG_MINOR, err);
-               goto out;
+       /* Fill in the data structures */
+       devno = MKDEV(MAJOR(watchdog_devt), watchdog->id);
+       cdev_init(&watchdog->cdev, &watchdog_fops);
+       watchdog->cdev.owner = watchdog->ops->owner;
+
+       /* Add the device */
+       err  = cdev_add(&watchdog->cdev, devno, 1);
+       if (err) {
+               pr_err("watchdog%d unable to add device %d:%d\n",
+                       watchdog->id,  MAJOR(watchdog_devt), watchdog->id);
+               if (watchdog->id == 0) {
+                       misc_deregister(&watchdog_miscdev);
+                       old_wdd = NULL;
+               }
        }
-
-       return 0;
-
-out:
-       wdd = NULL;
-       clear_bit(0, &watchdog_dev_busy);
        return err;
 }
 
 /*
- *     watchdog_dev_unregister:
+ *     watchdog_dev_unregister: unregister a watchdog device
  *     @watchdog: watchdog device
  *
- *     Deregister the /dev/watchdog device.
+ *     Unregister the watchdog and if needed the legacy /dev/watchdog device.
  */
 
 int watchdog_dev_unregister(struct watchdog_device *watchdog)
 {
-       /* Check that a watchdog device was registered in the past */
-       if (!test_bit(0, &watchdog_dev_busy) || !wdd)
-               return -ENODEV;
-
-       /* We can only unregister the watchdog device that was registered */
-       if (watchdog != wdd) {
-               pr_err("%s: watchdog was not registered as /dev/watchdog\n",
-                      watchdog->info->identity);
-               return -ENODEV;
+       mutex_lock(&watchdog->lock);
+       set_bit(WDOG_UNREGISTERED, &watchdog->status);
+       mutex_unlock(&watchdog->lock);
+
+       cdev_del(&watchdog->cdev);
+       if (watchdog->id == 0) {
+               misc_deregister(&watchdog_miscdev);
+               old_wdd = NULL;
        }
-
-       misc_deregister(&watchdog_miscdev);
-       wdd = NULL;
-       clear_bit(0, &watchdog_dev_busy);
        return 0;
 }
+
+/*
+ *     watchdog_dev_init: init dev part of watchdog core
+ *
+ *     Allocate a range of chardev nodes to use for watchdog devices
+ */
+
+int __init watchdog_dev_init(void)
+{
+       int err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
+       if (err < 0)
+               pr_err("watchdog: unable to allocate char dev region\n");
+       return err;
+}
+
+/*
+ *     watchdog_dev_exit: exit dev part of watchdog core
+ *
+ *     Release the range of chardev nodes used for watchdog devices
+ */
+
+void __exit watchdog_dev_exit(void)
+{
+       unregister_chrdev_region(watchdog_devt, MAX_DOGS);
+}
index 6908e4ce2a0d69aa67ca0e11251d834770bbd68c..7595581d032cc9d9c5a7f04ee12b861dc76051ee 100644 (file)
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                                              handle_edge_irq, "event");
 
                xen_irq_info_evtchn_init(irq, evtchn);
+       } else {
+               struct irq_info *info = info_for_irq(irq);
+               WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
        }
 
 out:
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
                xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
 
                bind_evtchn_to_cpu(evtchn, cpu);
+       } else {
+               struct irq_info *info = info_for_irq(irq);
+               WARN_ON(info == NULL || info->type != IRQT_IPI);
        }
 
  out:
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
                xen_irq_info_virq_init(cpu, irq, evtchn, virq);
 
                bind_evtchn_to_cpu(evtchn, cpu);
+       } else {
+               struct irq_info *info = info_for_irq(irq);
+               WARN_ON(info == NULL || info->type != IRQT_VIRQ);
        }
 
 out:
index b84bf0b6cc34c4fd34bfd35015bd8057f0ddf6b9..18fff88254ebd9bd9e4c76b1df63ecf40dc4e139 100644 (file)
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev)
 
 #ifdef CONFIG_ACPI
                handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
-               if (!handle)
+               if (!handle && pci_dev->bus->bridge)
                        handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
 #ifdef CONFIG_PCI_IOV
                if (!handle && pci_dev->is_virtfn)
index dcb79521e6c8c256769c75295f90fb3444a8eaec..89f264c67420c2448f9fe029e8193fa438369ae8 100644 (file)
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
 }
 
 /* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
  * returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!)
  */
-static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
 __setup("nofrontswap", no_frontswap);
 
 static struct frontswap_ops __initdata tmem_frontswap_ops = {
-       .put_page = tmem_frontswap_put_page,
-       .get_page = tmem_frontswap_get_page,
+       .store = tmem_frontswap_store,
+       .load = tmem_frontswap_load,
        .invalidate_page = tmem_frontswap_flush_page,
        .invalidate_area = tmem_frontswap_flush_area,
        .init = tmem_frontswap_init
index a1e6c990cd410efded55c826f03bc5db13839d75..e3dd2a1e2bfc18e47abae82bce7ee60238527c08 100644 (file)
@@ -68,24 +68,6 @@ static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
        return current_fsgid();
 }
 
-/**
- * v9fs_dentry_from_dir_inode - helper function to get the dentry from
- * dir inode.
- *
- */
-
-static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
-{
-       struct dentry *dentry;
-
-       spin_lock(&inode->i_lock);
-       /* Directory should have only one entry. */
-       BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
-       dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
-       spin_unlock(&inode->i_lock);
-       return dentry;
-}
-
 static int v9fs_test_inode_dotl(struct inode *inode, void *data)
 {
        struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -415,7 +397,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
        if (dir->i_mode & S_ISGID)
                omode |= S_ISGID;
 
-       dir_dentry = v9fs_dentry_from_dir_inode(dir);
+       dir_dentry = dentry->d_parent;
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
@@ -793,7 +775,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
                 dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
 
        v9ses = v9fs_inode2v9ses(dir);
-       dir_dentry = v9fs_dentry_from_dir_inode(dir);
+       dir_dentry = dentry->d_parent;
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid))
                return PTR_ERR(dfid);
@@ -858,7 +840,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
                return -EINVAL;
 
        v9ses = v9fs_inode2v9ses(dir);
-       dir_dentry = v9fs_dentry_from_dir_inode(dir);
+       dir_dentry = dentry->d_parent;
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
index 45a0ce45d7b46afa94b1290511bc1f91a9872137..1fceb320d2f22c16bc1a900cb27597d68977dbbd 100644 (file)
 #define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey])
 #define AFFS_BLOCK(sb, bh, blk)                (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
 
-#ifdef __LITTLE_ENDIAN
-#define BO_EXBITS      0x18UL
-#elif defined(__BIG_ENDIAN)
-#define BO_EXBITS      0x00UL
-#else
-#error Endianness must be known for affs to work.
-#endif
-
 #define AFFS_HEAD(bh)          ((struct affs_head *)(bh)->b_data)
 #define AFFS_TAIL(sb, bh)      ((struct affs_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_tail)))
 #define AFFS_ROOT_HEAD(bh)     ((struct affs_root_head *)(bh)->b_data)
index e7f2fad7b4ce7cae2d334456f5d9998e795c917e..55c4c76560537f7fe72d6ff5f429eff666b86789 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -134,9 +134,9 @@ static int aio_setup_ring(struct kioctx *ctx)
        info->mmap_size = nr_pages * PAGE_SIZE;
        dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
        down_write(&ctx->mm->mmap_sem);
-       info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
-                                 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
-                                 0);
+       info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 
+                                       PROT_READ|PROT_WRITE,
+                                       MAP_ANONYMOUS|MAP_PRIVATE, 0);
        if (IS_ERR((void *)info->mmap_base)) {
                up_write(&ctx->mm->mmap_sem);
                info->mmap_size = 0;
@@ -1446,13 +1446,13 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
                ret = compat_rw_copy_check_uvector(type,
                                (struct compat_iovec __user *)kiocb->ki_buf,
                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-                               &kiocb->ki_iovec, 1);
+                               &kiocb->ki_iovec);
        else
 #endif
                ret = rw_copy_check_uvector(type,
                                (struct iovec __user *)kiocb->ki_buf,
                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-                               &kiocb->ki_iovec, 1);
+                               &kiocb->ki_iovec);
        if (ret < 0)
                goto out;
 
index 584620e5dee52b5be4a456fb0572a5227a0ef534..0da90951d2776f827a905337938399ada79e8e69 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -176,6 +176,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
                        return -EPERM;
        }
 
+       if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
+               if (attr->ia_size != inode->i_size)
+                       inode_inc_iversion(inode);
+       }
+
        if ((ia_valid & ATTR_MODE)) {
                umode_t amode = attr->ia_mode;
                /* Flag setting protected by i_mutex */
index e658dd134b95fb375b371a931e739baa95d249a8..1b52956afe33ab07889c3963ce2c41b32133483b 100644 (file)
@@ -329,7 +329,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        if (!size)
                return addr;
 
-       down_write(&current->mm->mmap_sem);
        /*
        * total_size is the size of the ELF (interpreter) image.
        * The _first_ mmap needs to know the full size, otherwise
@@ -340,13 +339,12 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        */
        if (total_size) {
                total_size = ELF_PAGEALIGN(total_size);
-               map_addr = do_mmap(filep, addr, total_size, prot, type, off);
+               map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
                if (!BAD_ADDR(map_addr))
-                       do_munmap(current->mm, map_addr+size, total_size-size);
+                       vm_munmap(map_addr+size, total_size-size);
        } else
-               map_addr = do_mmap(filep, addr, size, prot, type, off);
+               map_addr = vm_mmap(filep, addr, size, prot, type, off);
 
-       up_write(&current->mm->mmap_sem);
        return(map_addr);
 }
 
index 6b2daf99fab8bcd91d314f0abd951b8472a092d2..178cb70acc26de80ec3db21a8455e88b7fc0360b 100644 (file)
@@ -562,7 +562,7 @@ static int load_flat_file(struct linux_binprm * bprm,
                                realdatastart = (unsigned long) -ENOMEM;
                        printk("Unable to allocate RAM for process data, errno %d\n",
                                        (int)-realdatastart);
-                       do_munmap(current->mm, textpos, text_len);
+                       vm_munmap(textpos, text_len);
                        ret = realdatastart;
                        goto err;
                }
@@ -586,8 +586,8 @@ static int load_flat_file(struct linux_binprm * bprm,
                }
                if (IS_ERR_VALUE(result)) {
                        printk("Unable to read data+bss, errno %d\n", (int)-result);
-                       do_munmap(current->mm, textpos, text_len);
-                       do_munmap(current->mm, realdatastart, len);
+                       vm_munmap(textpos, text_len);
+                       vm_munmap(realdatastart, len);
                        ret = result;
                        goto err;
                }
@@ -654,7 +654,7 @@ static int load_flat_file(struct linux_binprm * bprm,
                }
                if (IS_ERR_VALUE(result)) {
                        printk("Unable to read code+data+bss, errno %d\n",(int)-result);
-                       do_munmap(current->mm, textpos, text_len + data_len + extra +
+                       vm_munmap(textpos, text_len + data_len + extra +
                                MAX_SHARED_LIBS * sizeof(unsigned long));
                        ret = result;
                        goto err;
index 89b156d85d63c9f29b66413e1558e85a758d0e12..761e2cd8fed16e6046951e50504b8bb9e7acd3e4 100644 (file)
@@ -227,7 +227,11 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
                if (ret > 0) {
                        /* we need an acl */
                        ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
+               } else {
+                       cache_no_acl(inode);
                }
+       } else {
+               cache_no_acl(inode);
        }
 failed:
        posix_acl_release(acl);
index bcec06750232e6cc3de09c62648201547709222b..a383c18e74e86eebaa847d756e3493e7ca3c9bfd 100644 (file)
 #include "delayed-ref.h"
 #include "locking.h"
 
+struct extent_inode_elem {
+       u64 inum;
+       u64 offset;
+       struct extent_inode_elem *next;
+};
+
+static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
+                               struct btrfs_file_extent_item *fi,
+                               u64 extent_item_pos,
+                               struct extent_inode_elem **eie)
+{
+       u64 data_offset;
+       u64 data_len;
+       struct extent_inode_elem *e;
+
+       data_offset = btrfs_file_extent_offset(eb, fi);
+       data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+       if (extent_item_pos < data_offset ||
+           extent_item_pos >= data_offset + data_len)
+               return 1;
+
+       e = kmalloc(sizeof(*e), GFP_NOFS);
+       if (!e)
+               return -ENOMEM;
+
+       e->next = *eie;
+       e->inum = key->objectid;
+       e->offset = key->offset + (extent_item_pos - data_offset);
+       *eie = e;
+
+       return 0;
+}
+
+static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
+                               u64 extent_item_pos,
+                               struct extent_inode_elem **eie)
+{
+       u64 disk_byte;
+       struct btrfs_key key;
+       struct btrfs_file_extent_item *fi;
+       int slot;
+       int nritems;
+       int extent_type;
+       int ret;
+
+       /*
+        * from the shared data ref, we only have the leaf but we need
+        * the key. thus, we must look into all items and see that we
+        * find one (some) with a reference to our extent item.
+        */
+       nritems = btrfs_header_nritems(eb);
+       for (slot = 0; slot < nritems; ++slot) {
+               btrfs_item_key_to_cpu(eb, &key, slot);
+               if (key.type != BTRFS_EXTENT_DATA_KEY)
+                       continue;
+               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+               extent_type = btrfs_file_extent_type(eb, fi);
+               if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+                       continue;
+               /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
+               disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+               if (disk_byte != wanted_disk_byte)
+                       continue;
+
+               ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
 /*
  * this structure records all encountered refs on the way up to the root
  */
 struct __prelim_ref {
        struct list_head list;
        u64 root_id;
-       struct btrfs_key key;
+       struct btrfs_key key_for_search;
        int level;
        int count;
+       struct extent_inode_elem *inode_list;
        u64 parent;
        u64 wanted_disk_byte;
 };
 
+/*
+ * the rules for all callers of this function are:
+ * - obtaining the parent is the goal
+ * - if you add a key, you must know that it is a correct key
+ * - if you cannot add the parent or a correct key, then we will look into the
+ *   block later to set a correct key
+ *
+ * delayed refs
+ * ============
+ *        backref type | shared | indirect | shared | indirect
+ * information         |   tree |     tree |   data |     data
+ * --------------------+--------+----------+--------+----------
+ *      parent logical |    y   |     -    |    -   |     -
+ *      key to resolve |    -   |     y    |    y   |     y
+ *  tree block logical |    -   |     -    |    -   |     -
+ *  root for resolving |    y   |     y    |    y   |     y
+ *
+ * - column 1:       we've the parent -> done
+ * - column 2, 3, 4: we use the key to find the parent
+ *
+ * on disk refs (inline or keyed)
+ * ==============================
+ *        backref type | shared | indirect | shared | indirect
+ * information         |   tree |     tree |   data |     data
+ * --------------------+--------+----------+--------+----------
+ *      parent logical |    y   |     -    |    y   |     -
+ *      key to resolve |    -   |     -    |    -   |     y
+ *  tree block logical |    y   |     y    |    y   |     y
+ *  root for resolving |    -   |     y    |    y   |     y
+ *
+ * - column 1, 3: we've the parent -> done
+ * - column 2:    we take the first key from the block to find the parent
+ *                (see __add_missing_keys)
+ * - column 4:    we use the key to find the parent
+ *
+ * additional information that's available but not required to find the parent
+ * block might help in merging entries to gain some speed.
+ */
+
 static int __add_prelim_ref(struct list_head *head, u64 root_id,
-                           struct btrfs_key *key, int level, u64 parent,
-                           u64 wanted_disk_byte, int count)
+                           struct btrfs_key *key, int level,
+                           u64 parent, u64 wanted_disk_byte, int count)
 {
        struct __prelim_ref *ref;
 
@@ -50,10 +163,11 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
 
        ref->root_id = root_id;
        if (key)
-               ref->key = *key;
+               ref->key_for_search = *key;
        else
-               memset(&ref->key, 0, sizeof(ref->key));
+               memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 
+       ref->inode_list = NULL;
        ref->level = level;
        ref->count = count;
        ref->parent = parent;
@@ -64,52 +178,75 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
 }
 
 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
-                               struct ulist *parents,
-                               struct extent_buffer *eb, int level,
-                               u64 wanted_objectid, u64 wanted_disk_byte)
+                               struct ulist *parents, int level,
+                               struct btrfs_key *key_for_search, u64 time_seq,
+                               u64 wanted_disk_byte,
+                               const u64 *extent_item_pos)
 {
-       int ret;
+       int ret = 0;
        int slot;
-       struct btrfs_file_extent_item *fi;
+       struct extent_buffer *eb;
        struct btrfs_key key;
+       struct btrfs_file_extent_item *fi;
+       struct extent_inode_elem *eie = NULL;
        u64 disk_byte;
 
-add_parent:
-       ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
-       if (ret < 0)
-               return ret;
-
-       if (level != 0)
+       if (level != 0) {
+               eb = path->nodes[level];
+               ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
+               if (ret < 0)
+                       return ret;
                return 0;
+       }
 
        /*
-        * if the current leaf is full with EXTENT_DATA items, we must
-        * check the next one if that holds a reference as well.
-        * ref->count cannot be used to skip this check.
-        * repeat this until we don't find any additional EXTENT_DATA items.
+        * We normally enter this function with the path already pointing to
+        * the first item to check. But sometimes, we may enter it with
+        * slot==nritems. In that case, go to the next leaf before we continue.
         */
-       while (1) {
-               ret = btrfs_next_leaf(root, path);
-               if (ret < 0)
-                       return ret;
-               if (ret)
-                       return 0;
+       if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
+               ret = btrfs_next_old_leaf(root, path, time_seq);
 
+       while (!ret) {
                eb = path->nodes[0];
-               for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
-                       btrfs_item_key_to_cpu(eb, &key, slot);
-                       if (key.objectid != wanted_objectid ||
-                           key.type != BTRFS_EXTENT_DATA_KEY)
-                               return 0;
-                       fi = btrfs_item_ptr(eb, slot,
-                                               struct btrfs_file_extent_item);
-                       disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-                       if (disk_byte == wanted_disk_byte)
-                               goto add_parent;
+               slot = path->slots[0];
+
+               btrfs_item_key_to_cpu(eb, &key, slot);
+
+               if (key.objectid != key_for_search->objectid ||
+                   key.type != BTRFS_EXTENT_DATA_KEY)
+                       break;
+
+               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+               disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+
+               if (disk_byte == wanted_disk_byte) {
+                       eie = NULL;
+                       if (extent_item_pos) {
+                               ret = check_extent_in_eb(&key, eb, fi,
+                                               *extent_item_pos,
+                                               &eie);
+                               if (ret < 0)
+                                       break;
+                       }
+                       if (!ret) {
+                               ret = ulist_add(parents, eb->start,
+                                               (unsigned long)eie, GFP_NOFS);
+                               if (ret < 0)
+                                       break;
+                               if (!extent_item_pos) {
+                                       ret = btrfs_next_old_leaf(root, path,
+                                                       time_seq);
+                                       continue;
+                               }
+                       }
                }
+               ret = btrfs_next_old_item(root, path, time_seq);
        }
 
-       return 0;
+       if (ret > 0)
+               ret = 0;
+       return ret;
 }
 
 /*
@@ -118,13 +255,14 @@ add_parent:
  */
 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                                        int search_commit_root,
+                                       u64 time_seq,
                                        struct __prelim_ref *ref,
-                                       struct ulist *parents)
+                                       struct ulist *parents,
+                                       const u64 *extent_item_pos)
 {
        struct btrfs_path *path;
        struct btrfs_root *root;
        struct btrfs_key root_key;
-       struct btrfs_key key = {0};
        struct extent_buffer *eb;
        int ret = 0;
        int root_level;
@@ -152,36 +290,30 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
 
        path->lowest_level = level;
-       ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
+       ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
        pr_debug("search slot in root %llu (level %d, ref count %d) returned "
                 "%d for key (%llu %u %llu)\n",
                 (unsigned long long)ref->root_id, level, ref->count, ret,
-                (unsigned long long)ref->key.objectid, ref->key.type,
-                (unsigned long long)ref->key.offset);
+                (unsigned long long)ref->key_for_search.objectid,
+                ref->key_for_search.type,
+                (unsigned long long)ref->key_for_search.offset);
        if (ret < 0)
                goto out;
 
        eb = path->nodes[level];
-       if (!eb) {
-               WARN_ON(1);
-               ret = 1;
-               goto out;
-       }
-
-       if (level == 0) {
-               if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret)
-                               goto out;
-                       eb = path->nodes[0];
+       while (!eb) {
+               if (!level) {
+                       WARN_ON(1);
+                       ret = 1;
+                       goto out;
                }
-
-               btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
+               level--;
+               eb = path->nodes[level];
        }
 
-       /* the last two parameters will only be used for level == 0 */
-       ret = add_all_parents(root, path, parents, eb, level, key.objectid,
-                               ref->wanted_disk_byte);
+       ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
+                               time_seq, ref->wanted_disk_byte,
+                               extent_item_pos);
 out:
        btrfs_free_path(path);
        return ret;
@@ -191,8 +323,9 @@ out:
  * resolve all indirect backrefs from the list
  */
 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
-                                  int search_commit_root,
-                                  struct list_head *head)
+                                  int search_commit_root, u64 time_seq,
+                                  struct list_head *head,
+                                  const u64 *extent_item_pos)
 {
        int err;
        int ret = 0;
@@ -201,6 +334,7 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
        struct __prelim_ref *new_ref;
        struct ulist *parents;
        struct ulist_node *node;
+       struct ulist_iterator uiter;
 
        parents = ulist_alloc(GFP_NOFS);
        if (!parents)
@@ -217,7 +351,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
                if (ref->count == 0)
                        continue;
                err = __resolve_indirect_ref(fs_info, search_commit_root,
-                                            ref, parents);
+                                            time_seq, ref, parents,
+                                            extent_item_pos);
                if (err) {
                        if (ret == 0)
                                ret = err;
@@ -225,11 +360,14 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
                }
 
                /* we put the first parent into the ref at hand */
-               node = ulist_next(parents, NULL);
+               ULIST_ITER_INIT(&uiter);
+               node = ulist_next(parents, &uiter);
                ref->parent = node ? node->val : 0;
+               ref->inode_list =
+                       node ? (struct extent_inode_elem *)node->aux : 0;
 
                /* additional parents require new refs being added here */
-               while ((node = ulist_next(parents, node))) {
+               while ((node = ulist_next(parents, &uiter))) {
                        new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
                        if (!new_ref) {
                                ret = -ENOMEM;
@@ -237,6 +375,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
                        }
                        memcpy(new_ref, ref, sizeof(*ref));
                        new_ref->parent = node->val;
+                       new_ref->inode_list =
+                                       (struct extent_inode_elem *)node->aux;
                        list_add(&new_ref->list, &ref->list);
                }
                ulist_reinit(parents);
@@ -246,10 +386,65 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
        return ret;
 }
 
+static inline int ref_for_same_block(struct __prelim_ref *ref1,
+                                    struct __prelim_ref *ref2)
+{
+       if (ref1->level != ref2->level)
+               return 0;
+       if (ref1->root_id != ref2->root_id)
+               return 0;
+       if (ref1->key_for_search.type != ref2->key_for_search.type)
+               return 0;
+       if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
+               return 0;
+       if (ref1->key_for_search.offset != ref2->key_for_search.offset)
+               return 0;
+       if (ref1->parent != ref2->parent)
+               return 0;
+
+       return 1;
+}
+
+/*
+ * read tree blocks and add keys where required.
+ */
+static int __add_missing_keys(struct btrfs_fs_info *fs_info,
+                             struct list_head *head)
+{
+       struct list_head *pos;
+       struct extent_buffer *eb;
+
+       list_for_each(pos, head) {
+               struct __prelim_ref *ref;
+               ref = list_entry(pos, struct __prelim_ref, list);
+
+               if (ref->parent)
+                       continue;
+               if (ref->key_for_search.type)
+                       continue;
+               BUG_ON(!ref->wanted_disk_byte);
+               eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
+                                    fs_info->tree_root->leafsize, 0);
+               BUG_ON(!eb);
+               btrfs_tree_read_lock(eb);
+               if (btrfs_header_level(eb) == 0)
+                       btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
+               else
+                       btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
+               btrfs_tree_read_unlock(eb);
+               free_extent_buffer(eb);
+       }
+       return 0;
+}
+
 /*
  * merge two lists of backrefs and adjust counts accordingly
  *
  * mode = 1: merge identical keys, if key is set
+ *    FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
+ *           additionally, we could even add a key range for the blocks we
+ *           looked into to merge even more (-> replace unresolved refs by those
+ *           having a parent).
  * mode = 2: merge identical parents
  */
 static int __merge_refs(struct list_head *head, int mode)
@@ -263,20 +458,21 @@ static int __merge_refs(struct list_head *head, int mode)
 
                ref1 = list_entry(pos1, struct __prelim_ref, list);
 
-               if (mode == 1 && ref1->key.type == 0)
-                       continue;
                for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
                     pos2 = n2, n2 = pos2->next) {
                        struct __prelim_ref *ref2;
+                       struct __prelim_ref *xchg;
 
                        ref2 = list_entry(pos2, struct __prelim_ref, list);
 
                        if (mode == 1) {
-                               if (memcmp(&ref1->key, &ref2->key,
-                                          sizeof(ref1->key)) ||
-                                   ref1->level != ref2->level ||
-                                   ref1->root_id != ref2->root_id)
+                               if (!ref_for_same_block(ref1, ref2))
                                        continue;
+                               if (!ref1->parent && ref2->parent) {
+                                       xchg = ref1;
+                                       ref1 = ref2;
+                                       ref2 = xchg;
+                               }
                                ref1->count += ref2->count;
                        } else {
                                if (ref1->parent != ref2->parent)
@@ -296,16 +492,17 @@ static int __merge_refs(struct list_head *head, int mode)
  * smaller or equal that seq to the list
  */
 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
-                             struct btrfs_key *info_key,
                              struct list_head *prefs)
 {
        struct btrfs_delayed_extent_op *extent_op = head->extent_op;
        struct rb_node *n = &head->node.rb_node;
+       struct btrfs_key key;
+       struct btrfs_key op_key = {0};
        int sgn;
        int ret = 0;
 
        if (extent_op && extent_op->update_key)
-               btrfs_disk_key_to_cpu(info_key, &extent_op->key);
+               btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
 
        while ((n = rb_prev(n))) {
                struct btrfs_delayed_ref_node *node;
@@ -337,7 +534,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                        struct btrfs_delayed_tree_ref *ref;
 
                        ref = btrfs_delayed_node_to_tree_ref(node);
-                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                       ret = __add_prelim_ref(prefs, ref->root, &op_key,
                                               ref->level + 1, 0, node->bytenr,
                                               node->ref_mod * sgn);
                        break;
@@ -346,7 +543,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                        struct btrfs_delayed_tree_ref *ref;
 
                        ref = btrfs_delayed_node_to_tree_ref(node);
-                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                       ret = __add_prelim_ref(prefs, ref->root, NULL,
                                               ref->level + 1, ref->parent,
                                               node->bytenr,
                                               node->ref_mod * sgn);
@@ -354,8 +551,6 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                }
                case BTRFS_EXTENT_DATA_REF_KEY: {
                        struct btrfs_delayed_data_ref *ref;
-                       struct btrfs_key key;
-
                        ref = btrfs_delayed_node_to_data_ref(node);
 
                        key.objectid = ref->objectid;
@@ -368,7 +563,6 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                }
                case BTRFS_SHARED_DATA_REF_KEY: {
                        struct btrfs_delayed_data_ref *ref;
-                       struct btrfs_key key;
 
                        ref = btrfs_delayed_node_to_data_ref(node);
 
@@ -394,8 +588,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
  */
 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
                             struct btrfs_path *path, u64 bytenr,
-                            struct btrfs_key *info_key, int *info_level,
-                            struct list_head *prefs)
+                            int *info_level, struct list_head *prefs)
 {
        int ret = 0;
        int slot;
@@ -411,7 +604,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
         * enumerate all inline refs
         */
        leaf = path->nodes[0];
-       slot = path->slots[0] - 1;
+       slot = path->slots[0];
 
        item_size = btrfs_item_size_nr(leaf, slot);
        BUG_ON(item_size < sizeof(*ei));
@@ -424,12 +617,9 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
 
        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                struct btrfs_tree_block_info *info;
-               struct btrfs_disk_key disk_key;
 
                info = (struct btrfs_tree_block_info *)ptr;
                *info_level = btrfs_tree_block_level(leaf, info);
-               btrfs_tree_block_key(leaf, info, &disk_key);
-               btrfs_disk_key_to_cpu(info_key, &disk_key);
                ptr += sizeof(struct btrfs_tree_block_info);
                BUG_ON(ptr > end);
        } else {
@@ -447,7 +637,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
 
                switch (type) {
                case BTRFS_SHARED_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, 0, info_key,
+                       ret = __add_prelim_ref(prefs, 0, NULL,
                                                *info_level + 1, offset,
                                                bytenr, 1);
                        break;
@@ -462,8 +652,9 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
                        break;
                }
                case BTRFS_TREE_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, offset, info_key,
-                                              *info_level + 1, 0, bytenr, 1);
+                       ret = __add_prelim_ref(prefs, offset, NULL,
+                                              *info_level + 1, 0,
+                                              bytenr, 1);
                        break;
                case BTRFS_EXTENT_DATA_REF_KEY: {
                        struct btrfs_extent_data_ref *dref;
@@ -477,8 +668,8 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
                        key.type = BTRFS_EXTENT_DATA_KEY;
                        key.offset = btrfs_extent_data_ref_offset(leaf, dref);
                        root = btrfs_extent_data_ref_root(leaf, dref);
-                       ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
-                                               count);
+                       ret = __add_prelim_ref(prefs, root, &key, 0, 0,
+                                              bytenr, count);
                        break;
                }
                default:
@@ -496,8 +687,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
  */
 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
                            struct btrfs_path *path, u64 bytenr,
-                           struct btrfs_key *info_key, int info_level,
-                           struct list_head *prefs)
+                           int info_level, struct list_head *prefs)
 {
        struct btrfs_root *extent_root = fs_info->extent_root;
        int ret;
@@ -527,7 +717,7 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
 
                switch (key.type) {
                case BTRFS_SHARED_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, 0, info_key,
+                       ret = __add_prelim_ref(prefs, 0, NULL,
                                                info_level + 1, key.offset,
                                                bytenr, 1);
                        break;
@@ -543,8 +733,9 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
                        break;
                }
                case BTRFS_TREE_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, key.offset, info_key,
-                                               info_level + 1, 0, bytenr, 1);
+                       ret = __add_prelim_ref(prefs, key.offset, NULL,
+                                              info_level + 1, 0,
+                                              bytenr, 1);
                        break;
                case BTRFS_EXTENT_DATA_REF_KEY: {
                        struct btrfs_extent_data_ref *dref;
@@ -560,7 +751,7 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
                        key.offset = btrfs_extent_data_ref_offset(leaf, dref);
                        root = btrfs_extent_data_ref_root(leaf, dref);
                        ret = __add_prelim_ref(prefs, root, &key, 0, 0,
-                                               bytenr, count);
+                                              bytenr, count);
                        break;
                }
                default:
@@ -582,11 +773,12 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
  */
 static int find_parent_nodes(struct btrfs_trans_handle *trans,
                             struct btrfs_fs_info *fs_info, u64 bytenr,
-                            u64 seq, struct ulist *refs, struct ulist *roots)
+                            u64 delayed_ref_seq, u64 time_seq,
+                            struct ulist *refs, struct ulist *roots,
+                            const u64 *extent_item_pos)
 {
        struct btrfs_key key;
        struct btrfs_path *path;
-       struct btrfs_key info_key = { 0 };
        struct btrfs_delayed_ref_root *delayed_refs = NULL;
        struct btrfs_delayed_ref_head *head;
        int info_level = 0;
@@ -645,8 +837,9 @@ again:
                                btrfs_put_delayed_ref(&head->node);
                                goto again;
                        }
-                       ret = __add_delayed_refs(head, seq, &info_key,
+                       ret = __add_delayed_refs(head, delayed_ref_seq,
                                                 &prefs_delayed);
+                       mutex_unlock(&head->mutex);
                        if (ret) {
                                spin_unlock(&delayed_refs->lock);
                                goto out;
@@ -659,16 +852,17 @@ again:
                struct extent_buffer *leaf;
                int slot;
 
+               path->slots[0]--;
                leaf = path->nodes[0];
-               slot = path->slots[0] - 1;
+               slot = path->slots[0];
                btrfs_item_key_to_cpu(leaf, &key, slot);
                if (key.objectid == bytenr &&
                    key.type == BTRFS_EXTENT_ITEM_KEY) {
                        ret = __add_inline_refs(fs_info, path, bytenr,
-                                               &info_key, &info_level, &prefs);
+                                               &info_level, &prefs);
                        if (ret)
                                goto out;
-                       ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
+                       ret = __add_keyed_refs(fs_info, path, bytenr,
                                               info_level, &prefs);
                        if (ret)
                                goto out;
@@ -676,21 +870,18 @@ again:
        }
        btrfs_release_path(path);
 
-       /*
-        * when adding the delayed refs above, the info_key might not have
-        * been known yet. Go over the list and replace the missing keys
-        */
-       list_for_each_entry(ref, &prefs_delayed, list) {
-               if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
-                       memcpy(&ref->key, &info_key, sizeof(ref->key));
-       }
        list_splice_init(&prefs_delayed, &prefs);
 
+       ret = __add_missing_keys(fs_info, &prefs);
+       if (ret)
+               goto out;
+
        ret = __merge_refs(&prefs, 1);
        if (ret)
                goto out;
 
-       ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
+       ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
+                                     &prefs, extent_item_pos);
        if (ret)
                goto out;
 
@@ -709,15 +900,39 @@ again:
                        BUG_ON(ret < 0);
                }
                if (ref->count && ref->parent) {
-                       ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
+                       struct extent_inode_elem *eie = NULL;
+                       if (extent_item_pos && !ref->inode_list) {
+                               u32 bsz;
+                               struct extent_buffer *eb;
+                               bsz = btrfs_level_size(fs_info->extent_root,
+                                                       info_level);
+                               eb = read_tree_block(fs_info->extent_root,
+                                                          ref->parent, bsz, 0);
+                               BUG_ON(!eb);
+                               ret = find_extent_in_eb(eb, bytenr,
+                                                       *extent_item_pos, &eie);
+                               ref->inode_list = eie;
+                               free_extent_buffer(eb);
+                       }
+                       ret = ulist_add_merge(refs, ref->parent,
+                                             (unsigned long)ref->inode_list,
+                                             (unsigned long *)&eie, GFP_NOFS);
+                       if (!ret && extent_item_pos) {
+                               /*
+                                * we've recorded that parent, so we must extend
+                                * its inode list here
+                                */
+                               BUG_ON(!eie);
+                               while (eie->next)
+                                       eie = eie->next;
+                               eie->next = ref->inode_list;
+                       }
                        BUG_ON(ret < 0);
                }
                kfree(ref);
        }
 
 out:
-       if (head)
-               mutex_unlock(&head->mutex);
        btrfs_free_path(path);
        while (!list_empty(&prefs)) {
                ref = list_first_entry(&prefs, struct __prelim_ref, list);
@@ -734,6 +949,28 @@ out:
        return ret;
 }
 
+static void free_leaf_list(struct ulist *blocks)
+{
+       struct ulist_node *node = NULL;
+       struct extent_inode_elem *eie;
+       struct extent_inode_elem *eie_next;
+       struct ulist_iterator uiter;
+
+       ULIST_ITER_INIT(&uiter);
+       while ((node = ulist_next(blocks, &uiter))) {
+               if (!node->aux)
+                       continue;
+               eie = (struct extent_inode_elem *)node->aux;
+               for (; eie; eie = eie_next) {
+                       eie_next = eie->next;
+                       kfree(eie);
+               }
+               node->aux = 0;
+       }
+
+       ulist_free(blocks);
+}
+
 /*
  * Finds all leafs with a reference to the specified combination of bytenr and
  * offset. key_list_head will point to a list of corresponding keys (caller must
@@ -744,7 +981,9 @@ out:
  */
 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *fs_info, u64 bytenr,
-                               u64 num_bytes, u64 seq, struct ulist **leafs)
+                               u64 delayed_ref_seq, u64 time_seq,
+                               struct ulist **leafs,
+                               const u64 *extent_item_pos)
 {
        struct ulist *tmp;
        int ret;
@@ -758,11 +997,12 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        }
 
-       ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
+       ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+                               time_seq, *leafs, tmp, extent_item_pos);
        ulist_free(tmp);
 
        if (ret < 0 && ret != -ENOENT) {
-               ulist_free(*leafs);
+               free_leaf_list(*leafs);
                return ret;
        }
 
@@ -784,10 +1024,12 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
  */
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *fs_info, u64 bytenr,
-                               u64 num_bytes, u64 seq, struct ulist **roots)
+                               u64 delayed_ref_seq, u64 time_seq,
+                               struct ulist **roots)
 {
        struct ulist *tmp;
        struct ulist_node *node = NULL;
+       struct ulist_iterator uiter;
        int ret;
 
        tmp = ulist_alloc(GFP_NOFS);
@@ -799,15 +1041,16 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        }
 
+       ULIST_ITER_INIT(&uiter);
        while (1) {
-               ret = find_parent_nodes(trans, fs_info, bytenr, seq,
-                                       tmp, *roots);
+               ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+                                       time_seq, tmp, *roots, NULL);
                if (ret < 0 && ret != -ENOENT) {
                        ulist_free(tmp);
                        ulist_free(*roots);
                        return ret;
                }
-               node = ulist_next(tmp, node);
+               node = ulist_next(tmp, &uiter);
                if (!node)
                        break;
                bytenr = node->val;
@@ -1093,67 +1336,25 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
        return 0;
 }
 
-static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
-                               u64 orig_extent_item_objectid,
-                               u64 extent_item_pos, u64 root,
+static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
+                               u64 root, u64 extent_item_objectid,
                                iterate_extent_inodes_t *iterate, void *ctx)
 {
-       u64 disk_byte;
-       struct btrfs_key key;
-       struct btrfs_file_extent_item *fi;
-       struct extent_buffer *eb;
-       int slot;
-       int nritems;
+       struct extent_inode_elem *eie;
        int ret = 0;
-       int extent_type;
-       u64 data_offset;
-       u64 data_len;
-
-       eb = read_tree_block(fs_info->tree_root, logical,
-                               fs_info->tree_root->leafsize, 0);
-       if (!eb)
-               return -EIO;
-
-       /*
-        * from the shared data ref, we only have the leaf but we need
-        * the key. thus, we must look into all items and see that we
-        * find one (some) with a reference to our extent item.
-        */
-       nritems = btrfs_header_nritems(eb);
-       for (slot = 0; slot < nritems; ++slot) {
-               btrfs_item_key_to_cpu(eb, &key, slot);
-               if (key.type != BTRFS_EXTENT_DATA_KEY)
-                       continue;
-               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
-               extent_type = btrfs_file_extent_type(eb, fi);
-               if (extent_type == BTRFS_FILE_EXTENT_INLINE)
-                       continue;
-               /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
-               disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-               if (disk_byte != orig_extent_item_objectid)
-                       continue;
-
-               data_offset = btrfs_file_extent_offset(eb, fi);
-               data_len = btrfs_file_extent_num_bytes(eb, fi);
-
-               if (extent_item_pos < data_offset ||
-                   extent_item_pos >= data_offset + data_len)
-                       continue;
 
+       for (eie = inode_list; eie; eie = eie->next) {
                pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
-                               "root %llu\n", orig_extent_item_objectid,
-                               key.objectid, key.offset, root);
-               ret = iterate(key.objectid,
-                               key.offset + (extent_item_pos - data_offset),
-                               root, ctx);
+                        "root %llu\n", extent_item_objectid,
+                        eie->inum, eie->offset, root);
+               ret = iterate(eie->inum, eie->offset, root, ctx);
                if (ret) {
-                       pr_debug("stopping iteration because ret=%d\n", ret);
+                       pr_debug("stopping iteration for %llu due to ret=%d\n",
+                                extent_item_objectid, ret);
                        break;
                }
        }
 
-       free_extent_buffer(eb);
-
        return ret;
 }
 
@@ -1175,7 +1376,10 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
        struct ulist *roots = NULL;
        struct ulist_node *ref_node = NULL;
        struct ulist_node *root_node = NULL;
-       struct seq_list seq_elem;
+       struct seq_list seq_elem = {};
+       struct seq_list tree_mod_seq_elem = {};
+       struct ulist_iterator ref_uiter;
+       struct ulist_iterator root_uiter;
        struct btrfs_delayed_ref_root *delayed_refs = NULL;
 
        pr_debug("resolving all inodes for extent %llu\n",
@@ -1192,34 +1396,41 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
                spin_lock(&delayed_refs->lock);
                btrfs_get_delayed_seq(delayed_refs, &seq_elem);
                spin_unlock(&delayed_refs->lock);
+               btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
        }
 
        ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
-                                  extent_item_pos, seq_elem.seq,
-                                  &refs);
-
+                                  seq_elem.seq, tree_mod_seq_elem.seq, &refs,
+                                  &extent_item_pos);
        if (ret)
                goto out;
 
-       while (!ret && (ref_node = ulist_next(refs, ref_node))) {
-               ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1,
-                                               seq_elem.seq, &roots);
+       ULIST_ITER_INIT(&ref_uiter);
+       while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
+               ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
+                                               seq_elem.seq,
+                                               tree_mod_seq_elem.seq, &roots);
                if (ret)
                        break;
-               while (!ret && (root_node = ulist_next(roots, root_node))) {
-                       pr_debug("root %llu references leaf %llu\n",
-                                       root_node->val, ref_node->val);
-                       ret = iterate_leaf_refs(fs_info, ref_node->val,
-                                               extent_item_objectid,
-                                               extent_item_pos, root_node->val,
-                                               iterate, ctx);
+               ULIST_ITER_INIT(&root_uiter);
+               while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
+                       pr_debug("root %llu references leaf %llu, data list "
+                                "%#lx\n", root_node->val, ref_node->val,
+                                ref_node->aux);
+                       ret = iterate_leaf_refs(
+                               (struct extent_inode_elem *)ref_node->aux,
+                               root_node->val, extent_item_objectid,
+                               iterate, ctx);
                }
+               ulist_free(roots);
+               roots = NULL;
        }
 
-       ulist_free(refs);
+       free_leaf_list(refs);
        ulist_free(roots);
 out:
        if (!search_commit_root) {
+               btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
                btrfs_put_delayed_seq(delayed_refs, &seq_elem);
                btrfs_end_transaction(trans, fs_info->extent_root);
        }
index 57ea2e959e4dcfaba89e4ee0b833f5744c3639d3..c18d8ac7b795da487c4a526979954e91cbddf52b 100644 (file)
@@ -58,7 +58,8 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
 
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *fs_info, u64 bytenr,
-                               u64 num_bytes, u64 seq, struct ulist **roots);
+                               u64 delayed_ref_seq, u64 time_seq,
+                               struct ulist **roots);
 
 struct btrfs_data_container *init_data_container(u32 total_bytes);
 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
index 9b9b15fd5204347c5ef2931fb186af679cb0d369..12394a90d60fb7115d5b8feee0e7df701f9e743d 100644 (file)
 #include "ordered-data.h"
 #include "delayed-inode.h"
 
+/*
+ * ordered_data_close is set by truncate when a file that used
+ * to have good data has been truncated to zero.  When it is set
+ * the btrfs file release call will add this inode to the
+ * ordered operations list so that we make sure to flush out any
+ * new data the application may have written before commit.
+ */
+#define BTRFS_INODE_ORDERED_DATA_CLOSE         0
+#define BTRFS_INODE_ORPHAN_META_RESERVED       1
+#define BTRFS_INODE_DUMMY                      2
+#define BTRFS_INODE_IN_DEFRAG                  3
+#define BTRFS_INODE_DELALLOC_META_RESERVED     4
+#define BTRFS_INODE_HAS_ORPHAN_ITEM            5
+#define BTRFS_INODE_HAS_ASYNC_EXTENT           6
+
 /* in memory btrfs inode */
 struct btrfs_inode {
        /* which subvolume this inode belongs to */
@@ -57,9 +72,6 @@ struct btrfs_inode {
        /* used to order data wrt metadata */
        struct btrfs_ordered_inode_tree ordered_tree;
 
-       /* for keeping track of orphaned inodes */
-       struct list_head i_orphan;
-
        /* list of all the delalloc inodes in the FS.  There are times we need
         * to write all the delalloc pages to disk, and this list is used
         * to walk them all.
@@ -78,14 +90,13 @@ struct btrfs_inode {
        /* the space_info for where this inode's data allocations are done */
        struct btrfs_space_info *space_info;
 
+       unsigned long runtime_flags;
+
        /* full 64 bit generation number, struct vfs_inode doesn't have a big
         * enough field for this.
         */
        u64 generation;
 
-       /* sequence number for NFS changes */
-       u64 sequence;
-
        /*
         * transid of the trans_handle that last modified this inode
         */
@@ -144,23 +155,10 @@ struct btrfs_inode {
        unsigned outstanding_extents;
        unsigned reserved_extents;
 
-       /*
-        * ordered_data_close is set by truncate when a file that used
-        * to have good data has been truncated to zero.  When it is set
-        * the btrfs file release call will add this inode to the
-        * ordered operations list so that we make sure to flush out any
-        * new data the application may have written before commit.
-        */
-       unsigned ordered_data_close:1;
-       unsigned orphan_meta_reserved:1;
-       unsigned dummy_inode:1;
-       unsigned in_defrag:1;
-       unsigned delalloc_meta_reserved:1;
-
        /*
         * always compress this one file
         */
-       unsigned force_compress:4;
+       unsigned force_compress;
 
        struct btrfs_delayed_node *delayed_node;
 
@@ -202,4 +200,17 @@ static inline bool btrfs_is_free_space_inode(struct btrfs_root *root,
        return false;
 }
 
+static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       int ret = 0;
+
+       mutex_lock(&root->log_mutex);
+       if (BTRFS_I(inode)->logged_trans == generation &&
+           BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
+               ret = 1;
+       mutex_unlock(&root->log_mutex);
+       return ret;
+}
+
 #endif
index c053e90f2006f580ed4f8a4440fb520639c3edd6..da6e9364a5e3caa48b67c5e17f95a21ded5ee9ec 100644 (file)
@@ -93,6 +93,7 @@
 #include "print-tree.h"
 #include "locking.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6)   /* in characters,
                                                         * excluding " [...]" */
-#define BTRFSIC_BLOCK_SIZE PAGE_SIZE
-
 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
 
 /*
@@ -210,8 +209,9 @@ struct btrfsic_block_data_ctx {
        u64 dev_bytenr;         /* physical bytenr on device */
        u32 len;
        struct btrfsic_dev_state *dev;
-       char *data;
-       struct buffer_head *bh; /* do not use if set to NULL */
+       char **datav;
+       struct page **pagev;
+       void *mem_to_free;
 };
 
 /* This structure is used to implement recursion without occupying
@@ -243,6 +243,8 @@ struct btrfsic_state {
        struct btrfs_root *root;
        u64 max_superblock_generation;
        struct btrfsic_block *latest_superblock;
+       u32 metablock_size;
+       u32 datablock_size;
 };
 
 static void btrfsic_block_init(struct btrfsic_block *b);
@@ -290,8 +292,10 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
 static int btrfsic_process_metablock(struct btrfsic_state *state,
                                     struct btrfsic_block *block,
                                     struct btrfsic_block_data_ctx *block_ctx,
-                                    struct btrfs_header *hdr,
                                     int limit_nesting, int force_iodone_flag);
+static void btrfsic_read_from_block_data(
+       struct btrfsic_block_data_ctx *block_ctx,
+       void *dst, u32 offset, size_t len);
 static int btrfsic_create_link_to_next_block(
                struct btrfsic_state *state,
                struct btrfsic_block *block,
@@ -318,12 +322,13 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
 static int btrfsic_read_block(struct btrfsic_state *state,
                              struct btrfsic_block_data_ctx *block_ctx);
 static void btrfsic_dump_database(struct btrfsic_state *state);
+static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
-                                    const u8 *data, unsigned int size);
+                                    char **datav, unsigned int num_pages);
 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
-                                         u64 dev_bytenr, u8 *mapped_data,
-                                         unsigned int len, struct bio *bio,
-                                         int *bio_is_patched,
+                                         u64 dev_bytenr, char **mapped_datav,
+                                         unsigned int num_pages,
+                                         struct bio *bio, int *bio_is_patched,
                                          struct buffer_head *bh,
                                          int submit_bio_bh_rw);
 static int btrfsic_process_written_superblock(
@@ -375,7 +380,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
                                           u64 bytenr,
                                           struct btrfsic_dev_state *dev_state,
-                                          u64 dev_bytenr, char *data);
+                                          u64 dev_bytenr);
 
 static struct mutex btrfsic_mutex;
 static int btrfsic_is_initialized;
@@ -651,7 +656,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
        int pass;
 
        BUG_ON(NULL == state);
-       selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS);
+       selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
        if (NULL == selected_super) {
                printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
                return -1;
@@ -718,7 +723,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->metablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -727,9 +732,9 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
                        struct btrfsic_block *next_block;
                        struct btrfsic_block_data_ctx tmp_next_block_ctx;
                        struct btrfsic_block_link *l;
-                       struct btrfs_header *hdr;
 
-                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                       ret = btrfsic_map_block(state, next_bytenr,
+                                               state->metablock_size,
                                                &tmp_next_block_ctx,
                                                mirror_num);
                        if (ret) {
@@ -758,7 +763,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
                        BUG_ON(NULL == l);
 
                        ret = btrfsic_read_block(state, &tmp_next_block_ctx);
-                       if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+                       if (ret < (int)PAGE_CACHE_SIZE) {
                                printk(KERN_INFO
                                       "btrfsic: read @logical %llu failed!\n",
                                       (unsigned long long)
@@ -768,11 +773,9 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
                                return -1;
                        }
 
-                       hdr = (struct btrfs_header *)tmp_next_block_ctx.data;
                        ret = btrfsic_process_metablock(state,
                                                        next_block,
                                                        &tmp_next_block_ctx,
-                                                       hdr,
                                                        BTRFS_MAX_LEVEL + 3, 1);
                        btrfsic_release_block_ctx(&tmp_next_block_ctx);
                }
@@ -799,7 +802,10 @@ static int btrfsic_process_superblock_dev_mirror(
 
        /* super block bytenr is always the unmapped device bytenr */
        dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
-       bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096);
+       if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
+               return -1;
+       bh = __bread(superblock_bdev, dev_bytenr / 4096,
+                    BTRFS_SUPER_INFO_SIZE);
        if (NULL == bh)
                return -1;
        super_tmp = (struct btrfs_super_block *)
@@ -808,7 +814,10 @@ static int btrfsic_process_superblock_dev_mirror(
        if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
            strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
                    sizeof(super_tmp->magic)) ||
-           memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) {
+           memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
+           btrfs_super_nodesize(super_tmp) != state->metablock_size ||
+           btrfs_super_leafsize(super_tmp) != state->metablock_size ||
+           btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
                brelse(bh);
                return 0;
        }
@@ -835,13 +844,14 @@ static int btrfsic_process_superblock_dev_mirror(
                superblock_tmp->never_written = 0;
                superblock_tmp->mirror_num = 1 + superblock_mirror_num;
                if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
-                       printk(KERN_INFO "New initial S-block (bdev %p, %s)"
-                              " @%llu (%s/%llu/%d)\n",
-                              superblock_bdev, device->name,
-                              (unsigned long long)dev_bytenr,
-                              dev_state->name,
-                              (unsigned long long)dev_bytenr,
-                              superblock_mirror_num);
+                       printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)"
+                                    " @%llu (%s/%llu/%d)\n",
+                                    superblock_bdev,
+                                    rcu_str_deref(device->name),
+                                    (unsigned long long)dev_bytenr,
+                                    dev_state->name,
+                                    (unsigned long long)dev_bytenr,
+                                    superblock_mirror_num);
                list_add(&superblock_tmp->all_blocks_node,
                         &state->all_blocks_list);
                btrfsic_block_hashtable_add(superblock_tmp,
@@ -893,7 +903,7 @@ static int btrfsic_process_superblock_dev_mirror(
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->metablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -902,7 +912,8 @@ static int btrfsic_process_superblock_dev_mirror(
                        struct btrfsic_block_data_ctx tmp_next_block_ctx;
                        struct btrfsic_block_link *l;
 
-                       if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                       if (btrfsic_map_block(state, next_bytenr,
+                                             state->metablock_size,
                                              &tmp_next_block_ctx,
                                              mirror_num)) {
                                printk(KERN_INFO "btrfsic: btrfsic_map_block("
@@ -966,13 +977,15 @@ static int btrfsic_process_metablock(
                struct btrfsic_state *state,
                struct btrfsic_block *const first_block,
                struct btrfsic_block_data_ctx *const first_block_ctx,
-               struct btrfs_header *const first_hdr,
                int first_limit_nesting, int force_iodone_flag)
 {
        struct btrfsic_stack_frame initial_stack_frame = { 0 };
        struct btrfsic_stack_frame *sf;
        struct btrfsic_stack_frame *next_stack;
+       struct btrfs_header *const first_hdr =
+               (struct btrfs_header *)first_block_ctx->datav[0];
 
+       BUG_ON(!first_hdr);
        sf = &initial_stack_frame;
        sf->error = 0;
        sf->i = -1;
@@ -1012,21 +1025,47 @@ continue_with_current_leaf_stack_frame:
                }
 
                if (sf->i < sf->nr) {
-                       struct btrfs_item *disk_item = leafhdr->items + sf->i;
-                       struct btrfs_disk_key *disk_key = &disk_item->key;
+                       struct btrfs_item disk_item;
+                       u32 disk_item_offset =
+                               (uintptr_t)(leafhdr->items + sf->i) -
+                               (uintptr_t)leafhdr;
+                       struct btrfs_disk_key *disk_key;
                        u8 type;
-                       const u32 item_offset = le32_to_cpu(disk_item->offset);
+                       u32 item_offset;
 
+                       if (disk_item_offset + sizeof(struct btrfs_item) >
+                           sf->block_ctx->len) {
+leaf_item_out_of_bounce_error:
+                               printk(KERN_INFO
+                                      "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
+                                      sf->block_ctx->start,
+                                      sf->block_ctx->dev->name);
+                               goto one_stack_frame_backwards;
+                       }
+                       btrfsic_read_from_block_data(sf->block_ctx,
+                                                    &disk_item,
+                                                    disk_item_offset,
+                                                    sizeof(struct btrfs_item));
+                       item_offset = le32_to_cpu(disk_item.offset);
+                       disk_key = &disk_item.key;
                        type = disk_key->type;
 
                        if (BTRFS_ROOT_ITEM_KEY == type) {
-                               const struct btrfs_root_item *const root_item =
-                                   (struct btrfs_root_item *)
-                                   (sf->block_ctx->data +
-                                    offsetof(struct btrfs_leaf, items) +
-                                    item_offset);
-                               const u64 next_bytenr =
-                                   le64_to_cpu(root_item->bytenr);
+                               struct btrfs_root_item root_item;
+                               u32 root_item_offset;
+                               u64 next_bytenr;
+
+                               root_item_offset = item_offset +
+                                       offsetof(struct btrfs_leaf, items);
+                               if (root_item_offset +
+                                   sizeof(struct btrfs_root_item) >
+                                   sf->block_ctx->len)
+                                       goto leaf_item_out_of_bounce_error;
+                               btrfsic_read_from_block_data(
+                                       sf->block_ctx, &root_item,
+                                       root_item_offset,
+                                       sizeof(struct btrfs_root_item));
+                               next_bytenr = le64_to_cpu(root_item.bytenr);
 
                                sf->error =
                                    btrfsic_create_link_to_next_block(
@@ -1041,7 +1080,7 @@ continue_with_current_leaf_stack_frame:
                                                &sf->num_copies,
                                                &sf->mirror_num,
                                                disk_key,
-                                               le64_to_cpu(root_item->
+                                               le64_to_cpu(root_item.
                                                generation));
                                if (sf->error)
                                        goto one_stack_frame_backwards;
@@ -1049,7 +1088,7 @@ continue_with_current_leaf_stack_frame:
                                if (NULL != sf->next_block) {
                                        struct btrfs_header *const next_hdr =
                                            (struct btrfs_header *)
-                                           sf->next_block_ctx.data;
+                                           sf->next_block_ctx.datav[0];
 
                                        next_stack =
                                            btrfsic_stack_frame_alloc();
@@ -1111,10 +1150,24 @@ continue_with_current_node_stack_frame:
                }
 
                if (sf->i < sf->nr) {
-                       struct btrfs_key_ptr *disk_key_ptr =
-                           nodehdr->ptrs + sf->i;
-                       const u64 next_bytenr =
-                           le64_to_cpu(disk_key_ptr->blockptr);
+                       struct btrfs_key_ptr key_ptr;
+                       u32 key_ptr_offset;
+                       u64 next_bytenr;
+
+                       key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
+                                         (uintptr_t)nodehdr;
+                       if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
+                           sf->block_ctx->len) {
+                               printk(KERN_INFO
+                                      "btrfsic: node item out of bounce at logical %llu, dev %s\n",
+                                      sf->block_ctx->start,
+                                      sf->block_ctx->dev->name);
+                               goto one_stack_frame_backwards;
+                       }
+                       btrfsic_read_from_block_data(
+                               sf->block_ctx, &key_ptr, key_ptr_offset,
+                               sizeof(struct btrfs_key_ptr));
+                       next_bytenr = le64_to_cpu(key_ptr.blockptr);
 
                        sf->error = btrfsic_create_link_to_next_block(
                                        state,
@@ -1127,15 +1180,15 @@ continue_with_current_node_stack_frame:
                                        force_iodone_flag,
                                        &sf->num_copies,
                                        &sf->mirror_num,
-                                       &disk_key_ptr->key,
-                                       le64_to_cpu(disk_key_ptr->generation));
+                                       &key_ptr.key,
+                                       le64_to_cpu(key_ptr.generation));
                        if (sf->error)
                                goto one_stack_frame_backwards;
 
                        if (NULL != sf->next_block) {
                                struct btrfs_header *const next_hdr =
                                    (struct btrfs_header *)
-                                   sf->next_block_ctx.data;
+                                   sf->next_block_ctx.datav[0];
 
                                next_stack = btrfsic_stack_frame_alloc();
                                if (NULL == next_stack)
@@ -1181,6 +1234,35 @@ one_stack_frame_backwards:
        return sf->error;
 }
 
+static void btrfsic_read_from_block_data(
+       struct btrfsic_block_data_ctx *block_ctx,
+       void *dstv, u32 offset, size_t len)
+{
+       size_t cur;
+       size_t offset_in_page;
+       char *kaddr;
+       char *dst = (char *)dstv;
+       size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
+       unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
+
+       WARN_ON(offset + len > block_ctx->len);
+       offset_in_page = (start_offset + offset) &
+                        ((unsigned long)PAGE_CACHE_SIZE - 1);
+
+       while (len > 0) {
+               cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
+               BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >>
+                           PAGE_CACHE_SHIFT);
+               kaddr = block_ctx->datav[i];
+               memcpy(dst, kaddr + offset_in_page, cur);
+
+               dst += cur;
+               len -= cur;
+               offset_in_page = 0;
+               i++;
+       }
+}
+
 static int btrfsic_create_link_to_next_block(
                struct btrfsic_state *state,
                struct btrfsic_block *block,
@@ -1204,7 +1286,7 @@ static int btrfsic_create_link_to_next_block(
        if (0 == *num_copiesp) {
                *num_copiesp =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->metablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, *num_copiesp);
@@ -1219,7 +1301,7 @@ static int btrfsic_create_link_to_next_block(
                       "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
                       *mirror_nump);
        ret = btrfsic_map_block(state, next_bytenr,
-                               BTRFSIC_BLOCK_SIZE,
+                               state->metablock_size,
                                next_block_ctx, *mirror_nump);
        if (ret) {
                printk(KERN_INFO
@@ -1314,7 +1396,7 @@ static int btrfsic_create_link_to_next_block(
 
        if (limit_nesting > 0 && did_alloc_block_link) {
                ret = btrfsic_read_block(state, next_block_ctx);
-               if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+               if (ret < (int)next_block_ctx->len) {
                        printk(KERN_INFO
                               "btrfsic: read block @logical %llu failed!\n",
                               (unsigned long long)next_bytenr);
@@ -1339,43 +1421,74 @@ static int btrfsic_handle_extent_data(
                u32 item_offset, int force_iodone_flag)
 {
        int ret;
-       struct btrfs_file_extent_item *file_extent_item =
-           (struct btrfs_file_extent_item *)(block_ctx->data +
-                                             offsetof(struct btrfs_leaf,
-                                                      items) + item_offset);
-       u64 next_bytenr =
-           le64_to_cpu(file_extent_item->disk_bytenr) +
-           le64_to_cpu(file_extent_item->offset);
-       u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes);
-       u64 generation = le64_to_cpu(file_extent_item->generation);
+       struct btrfs_file_extent_item file_extent_item;
+       u64 file_extent_item_offset;
+       u64 next_bytenr;
+       u64 num_bytes;
+       u64 generation;
        struct btrfsic_block_link *l;
 
+       file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
+                                 item_offset;
+       if (file_extent_item_offset +
+           offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
+           block_ctx->len) {
+               printk(KERN_INFO
+                      "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+                      block_ctx->start, block_ctx->dev->name);
+               return -1;
+       }
+
+       btrfsic_read_from_block_data(block_ctx, &file_extent_item,
+               file_extent_item_offset,
+               offsetof(struct btrfs_file_extent_item, disk_num_bytes));
+       if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
+           ((u64)0) == le64_to_cpu(file_extent_item.disk_bytenr)) {
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
+                       printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n",
+                              file_extent_item.type,
+                              (unsigned long long)
+                              le64_to_cpu(file_extent_item.disk_bytenr));
+               return 0;
+       }
+
+       if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
+           block_ctx->len) {
+               printk(KERN_INFO
+                      "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+                      block_ctx->start, block_ctx->dev->name);
+               return -1;
+       }
+       btrfsic_read_from_block_data(block_ctx, &file_extent_item,
+                                    file_extent_item_offset,
+                                    sizeof(struct btrfs_file_extent_item));
+       next_bytenr = le64_to_cpu(file_extent_item.disk_bytenr) +
+                     le64_to_cpu(file_extent_item.offset);
+       generation = le64_to_cpu(file_extent_item.generation);
+       num_bytes = le64_to_cpu(file_extent_item.num_bytes);
+       generation = le64_to_cpu(file_extent_item.generation);
+
        if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
                printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
                       " offset = %llu, num_bytes = %llu\n",
-                      file_extent_item->type,
-                      (unsigned long long)
-                      le64_to_cpu(file_extent_item->disk_bytenr),
+                      file_extent_item.type,
                       (unsigned long long)
-                      le64_to_cpu(file_extent_item->offset),
-                      (unsigned long long)
-                      le64_to_cpu(file_extent_item->num_bytes));
-       if (BTRFS_FILE_EXTENT_REG != file_extent_item->type ||
-           ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr))
-               return 0;
+                      le64_to_cpu(file_extent_item.disk_bytenr),
+                      (unsigned long long)le64_to_cpu(file_extent_item.offset),
+                      (unsigned long long)num_bytes);
        while (num_bytes > 0) {
                u32 chunk_len;
                int num_copies;
                int mirror_num;
 
-               if (num_bytes > BTRFSIC_BLOCK_SIZE)
-                       chunk_len = BTRFSIC_BLOCK_SIZE;
+               if (num_bytes > state->datablock_size)
+                       chunk_len = state->datablock_size;
                else
                        chunk_len = num_bytes;
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->datablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -1475,8 +1588,9 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
        block_ctx_out->dev_bytenr = multi->stripes[0].physical;
        block_ctx_out->start = bytenr;
        block_ctx_out->len = len;
-       block_ctx_out->data = NULL;
-       block_ctx_out->bh = NULL;
+       block_ctx_out->datav = NULL;
+       block_ctx_out->pagev = NULL;
+       block_ctx_out->mem_to_free = NULL;
 
        if (0 == ret)
                kfree(multi);
@@ -1496,8 +1610,9 @@ static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
        block_ctx_out->dev_bytenr = bytenr;
        block_ctx_out->start = bytenr;
        block_ctx_out->len = len;
-       block_ctx_out->data = NULL;
-       block_ctx_out->bh = NULL;
+       block_ctx_out->datav = NULL;
+       block_ctx_out->pagev = NULL;
+       block_ctx_out->mem_to_free = NULL;
        if (NULL != block_ctx_out->dev) {
                return 0;
        } else {
@@ -1508,38 +1623,127 @@ static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
 
 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
 {
-       if (NULL != block_ctx->bh) {
-               brelse(block_ctx->bh);
-               block_ctx->bh = NULL;
+       if (block_ctx->mem_to_free) {
+               unsigned int num_pages;
+
+               BUG_ON(!block_ctx->datav);
+               BUG_ON(!block_ctx->pagev);
+               num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
+                           PAGE_CACHE_SHIFT;
+               while (num_pages > 0) {
+                       num_pages--;
+                       if (block_ctx->datav[num_pages]) {
+                               kunmap(block_ctx->pagev[num_pages]);
+                               block_ctx->datav[num_pages] = NULL;
+                       }
+                       if (block_ctx->pagev[num_pages]) {
+                               __free_page(block_ctx->pagev[num_pages]);
+                               block_ctx->pagev[num_pages] = NULL;
+                       }
+               }
+
+               kfree(block_ctx->mem_to_free);
+               block_ctx->mem_to_free = NULL;
+               block_ctx->pagev = NULL;
+               block_ctx->datav = NULL;
        }
 }
 
 static int btrfsic_read_block(struct btrfsic_state *state,
                              struct btrfsic_block_data_ctx *block_ctx)
 {
-       block_ctx->bh = NULL;
-       if (block_ctx->dev_bytenr & 4095) {
+       unsigned int num_pages;
+       unsigned int i;
+       u64 dev_bytenr;
+       int ret;
+
+       BUG_ON(block_ctx->datav);
+       BUG_ON(block_ctx->pagev);
+       BUG_ON(block_ctx->mem_to_free);
+       if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
                printk(KERN_INFO
                       "btrfsic: read_block() with unaligned bytenr %llu\n",
                       (unsigned long long)block_ctx->dev_bytenr);
                return -1;
        }
-       if (block_ctx->len > 4096) {
-               printk(KERN_INFO
-                      "btrfsic: read_block() with too huge size %d\n",
-                      block_ctx->len);
+
+       num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
+                   PAGE_CACHE_SHIFT;
+       block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
+                                         sizeof(*block_ctx->pagev)) *
+                                        num_pages, GFP_NOFS);
+       if (!block_ctx->mem_to_free)
                return -1;
+       block_ctx->datav = block_ctx->mem_to_free;
+       block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
+       for (i = 0; i < num_pages; i++) {
+               block_ctx->pagev[i] = alloc_page(GFP_NOFS);
+               if (!block_ctx->pagev[i])
+                       return -1;
        }
 
-       block_ctx->bh = __bread(block_ctx->dev->bdev,
-                               block_ctx->dev_bytenr >> 12, 4096);
-       if (NULL == block_ctx->bh)
-               return -1;
-       block_ctx->data = block_ctx->bh->b_data;
+       dev_bytenr = block_ctx->dev_bytenr;
+       for (i = 0; i < num_pages;) {
+               struct bio *bio;
+               unsigned int j;
+               DECLARE_COMPLETION_ONSTACK(complete);
+
+               bio = bio_alloc(GFP_NOFS, num_pages - i);
+               if (!bio) {
+                       printk(KERN_INFO
+                              "btrfsic: bio_alloc() for %u pages failed!\n",
+                              num_pages - i);
+                       return -1;
+               }
+               bio->bi_bdev = block_ctx->dev->bdev;
+               bio->bi_sector = dev_bytenr >> 9;
+               bio->bi_end_io = btrfsic_complete_bio_end_io;
+               bio->bi_private = &complete;
+
+               for (j = i; j < num_pages; j++) {
+                       ret = bio_add_page(bio, block_ctx->pagev[j],
+                                          PAGE_CACHE_SIZE, 0);
+                       if (PAGE_CACHE_SIZE != ret)
+                               break;
+               }
+               if (j == i) {
+                       printk(KERN_INFO
+                              "btrfsic: error, failed to add a single page!\n");
+                       return -1;
+               }
+               submit_bio(READ, bio);
+
+               /* this will also unplug the queue */
+               wait_for_completion(&complete);
+
+               if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+                       printk(KERN_INFO
+                              "btrfsic: read error at logical %llu dev %s!\n",
+                              block_ctx->start, block_ctx->dev->name);
+                       bio_put(bio);
+                       return -1;
+               }
+               bio_put(bio);
+               dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
+               i = j;
+       }
+       for (i = 0; i < num_pages; i++) {
+               block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
+               if (!block_ctx->datav[i]) {
+                       printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
+                              block_ctx->dev->name);
+                       return -1;
+               }
+       }
 
        return block_ctx->len;
 }
 
+static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
+{
+       complete((struct completion *)bio->bi_private);
+}
+
 static void btrfsic_dump_database(struct btrfsic_state *state)
 {
        struct list_head *elem_all;
@@ -1617,32 +1821,39 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
  * (note that this test fails for the super block)
  */
 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
-                                    const u8 *data, unsigned int size)
+                                    char **datav, unsigned int num_pages)
 {
        struct btrfs_header *h;
        u8 csum[BTRFS_CSUM_SIZE];
        u32 crc = ~(u32)0;
-       int fail = 0;
-       int crc_fail = 0;
+       unsigned int i;
 
-       h = (struct btrfs_header *)data;
+       if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
+               return 1; /* not metadata */
+       num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
+       h = (struct btrfs_header *)datav[0];
 
        if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
-               fail++;
+               return 1;
+
+       for (i = 0; i < num_pages; i++) {
+               u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
+               size_t sublen = i ? PAGE_CACHE_SIZE :
+                                   (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
 
-       crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE);
+               crc = crc32c(crc, data, sublen);
+       }
        btrfs_csum_final(crc, csum);
        if (memcmp(csum, h->csum, state->csum_size))
-               crc_fail++;
+               return 1;
 
-       return fail || crc_fail;
+       return 0; /* is metadata */
 }
 
 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
-                                         u64 dev_bytenr,
-                                         u8 *mapped_data, unsigned int len,
-                                         struct bio *bio,
-                                         int *bio_is_patched,
+                                         u64 dev_bytenr, char **mapped_datav,
+                                         unsigned int num_pages,
+                                         struct bio *bio, int *bio_is_patched,
                                          struct buffer_head *bh,
                                          int submit_bio_bh_rw)
 {
@@ -1652,12 +1863,19 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
        int ret;
        struct btrfsic_state *state = dev_state->state;
        struct block_device *bdev = dev_state->bdev;
+       unsigned int processed_len;
 
-       WARN_ON(len > PAGE_SIZE);
-       is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len));
        if (NULL != bio_is_patched)
                *bio_is_patched = 0;
 
+again:
+       if (num_pages == 0)
+               return;
+
+       processed_len = 0;
+       is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
+                                                     num_pages));
+
        block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
                                               &state->block_hashtable);
        if (NULL != block) {
@@ -1667,8 +1885,16 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
 
                if (block->is_superblock) {
                        bytenr = le64_to_cpu(((struct btrfs_super_block *)
-                                             mapped_data)->bytenr);
+                                             mapped_datav[0])->bytenr);
+                       if (num_pages * PAGE_CACHE_SIZE <
+                           BTRFS_SUPER_INFO_SIZE) {
+                               printk(KERN_INFO
+                                      "btrfsic: cannot work with too short bios!\n");
+                               return;
+                       }
                        is_metadata = 1;
+                       BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
+                       processed_len = BTRFS_SUPER_INFO_SIZE;
                        if (state->print_mask &
                            BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
                                printk(KERN_INFO
@@ -1678,12 +1904,18 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                }
                if (is_metadata) {
                        if (!block->is_superblock) {
+                               if (num_pages * PAGE_CACHE_SIZE <
+                                   state->metablock_size) {
+                                       printk(KERN_INFO
+                                              "btrfsic: cannot work with too short bios!\n");
+                                       return;
+                               }
+                               processed_len = state->metablock_size;
                                bytenr = le64_to_cpu(((struct btrfs_header *)
-                                                     mapped_data)->bytenr);
+                                                     mapped_datav[0])->bytenr);
                                btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
                                                               dev_state,
-                                                              dev_bytenr,
-                                                              mapped_data);
+                                                              dev_bytenr);
                        }
                        if (block->logical_bytenr != bytenr) {
                                printk(KERN_INFO
@@ -1710,6 +1942,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                                       block->mirror_num,
                                       btrfsic_get_block_type(state, block));
                } else {
+                       if (num_pages * PAGE_CACHE_SIZE <
+                           state->datablock_size) {
+                               printk(KERN_INFO
+                                      "btrfsic: cannot work with too short bios!\n");
+                               return;
+                       }
+                       processed_len = state->datablock_size;
                        bytenr = block->logical_bytenr;
                        if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
                                printk(KERN_INFO
@@ -1747,7 +1986,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                               le64_to_cpu(block->disk_key.offset),
                               (unsigned long long)
                               le64_to_cpu(((struct btrfs_header *)
-                                           mapped_data)->generation),
+                                           mapped_datav[0])->generation),
                               (unsigned long long)
                               state->max_superblock_generation);
                        btrfsic_dump_tree(state);
@@ -1765,10 +2004,10 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                               (unsigned long long)block->generation,
                               (unsigned long long)
                               le64_to_cpu(((struct btrfs_header *)
-                                           mapped_data)->generation));
+                                           mapped_datav[0])->generation));
                        /* it would not be safe to go on */
                        btrfsic_dump_tree(state);
-                       return;
+                       goto continue_loop;
                }
 
                /*
@@ -1796,18 +2035,19 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                }
 
                if (block->is_superblock)
-                       ret = btrfsic_map_superblock(state, bytenr, len,
+                       ret = btrfsic_map_superblock(state, bytenr,
+                                                    processed_len,
                                                     bdev, &block_ctx);
                else
-                       ret = btrfsic_map_block(state, bytenr, len,
+                       ret = btrfsic_map_block(state, bytenr, processed_len,
                                                &block_ctx, 0);
                if (ret) {
                        printk(KERN_INFO
                               "btrfsic: btrfsic_map_block(root @%llu)"
                               " failed!\n", (unsigned long long)bytenr);
-                       return;
+                       goto continue_loop;
                }
-               block_ctx.data = mapped_data;
+               block_ctx.datav = mapped_datav;
                /* the following is required in case of writes to mirrors,
                 * use the same that was used for the lookup */
                block_ctx.dev = dev_state;
@@ -1863,11 +2103,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                        block->logical_bytenr = bytenr;
                        block->is_metadata = 1;
                        if (block->is_superblock) {
+                               BUG_ON(PAGE_CACHE_SIZE !=
+                                      BTRFS_SUPER_INFO_SIZE);
                                ret = btrfsic_process_written_superblock(
                                                state,
                                                block,
                                                (struct btrfs_super_block *)
-                                               mapped_data);
+                                               mapped_datav[0]);
                                if (state->print_mask &
                                    BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
                                        printk(KERN_INFO
@@ -1880,8 +2122,6 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                                                state,
                                                block,
                                                &block_ctx,
-                                               (struct btrfs_header *)
-                                               block_ctx.data,
                                                0, 0);
                        }
                        if (ret)
@@ -1912,26 +2152,30 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                u64 bytenr;
 
                if (!is_metadata) {
+                       processed_len = state->datablock_size;
                        if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
                                printk(KERN_INFO "Written block (%s/%llu/?)"
                                       " !found in hash table, D.\n",
                                       dev_state->name,
                                       (unsigned long long)dev_bytenr);
-                       if (!state->include_extent_data)
-                               return; /* ignore that written D block */
+                       if (!state->include_extent_data) {
+                               /* ignore that written D block */
+                               goto continue_loop;
+                       }
 
                        /* this is getting ugly for the
                         * include_extent_data case... */
                        bytenr = 0;     /* unknown */
                        block_ctx.start = bytenr;
-                       block_ctx.len = len;
-                       block_ctx.bh = NULL;
+                       block_ctx.len = processed_len;
+                       block_ctx.mem_to_free = NULL;
+                       block_ctx.pagev = NULL;
                } else {
+                       processed_len = state->metablock_size;
                        bytenr = le64_to_cpu(((struct btrfs_header *)
-                                             mapped_data)->bytenr);
+                                             mapped_datav[0])->bytenr);
                        btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
-                                                      dev_bytenr,
-                                                      mapped_data);
+                                                      dev_bytenr);
                        if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
                                printk(KERN_INFO
                                       "Written block @%llu (%s/%llu/?)"
@@ -1940,17 +2184,17 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                                       dev_state->name,
                                       (unsigned long long)dev_bytenr);
 
-                       ret = btrfsic_map_block(state, bytenr, len, &block_ctx,
-                                               0);
+                       ret = btrfsic_map_block(state, bytenr, processed_len,
+                                               &block_ctx, 0);
                        if (ret) {
                                printk(KERN_INFO
                                       "btrfsic: btrfsic_map_block(root @%llu)"
                                       " failed!\n",
                                       (unsigned long long)dev_bytenr);
-                               return;
+                               goto continue_loop;
                        }
                }
-               block_ctx.data = mapped_data;
+               block_ctx.datav = mapped_datav;
                /* the following is required in case of writes to mirrors,
                 * use the same that was used for the lookup */
                block_ctx.dev = dev_state;
@@ -1960,7 +2204,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                if (NULL == block) {
                        printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
                        btrfsic_release_block_ctx(&block_ctx);
-                       return;
+                       goto continue_loop;
                }
                block->dev_state = dev_state;
                block->dev_bytenr = dev_bytenr;
@@ -2020,9 +2264,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
 
                if (is_metadata) {
                        ret = btrfsic_process_metablock(state, block,
-                                                       &block_ctx,
-                                                       (struct btrfs_header *)
-                                                       block_ctx.data, 0, 0);
+                                                       &block_ctx, 0, 0);
                        if (ret)
                                printk(KERN_INFO
                                       "btrfsic: process_metablock(root @%llu)"
@@ -2031,6 +2273,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                }
                btrfsic_release_block_ctx(&block_ctx);
        }
+
+continue_loop:
+       BUG_ON(!processed_len);
+       dev_bytenr += processed_len;
+       mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
+       num_pages -= processed_len >> PAGE_CACHE_SHIFT;
+       goto again;
 }
 
 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
@@ -2213,7 +2462,7 @@ static int btrfsic_process_written_superblock(
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, BTRFS_SUPER_INFO_SIZE);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -2224,7 +2473,8 @@ static int btrfsic_process_written_superblock(
                                printk(KERN_INFO
                                       "btrfsic_process_written_superblock("
                                       "mirror_num=%d)\n", mirror_num);
-                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                       ret = btrfsic_map_block(state, next_bytenr,
+                                               BTRFS_SUPER_INFO_SIZE,
                                                &tmp_next_block_ctx,
                                                mirror_num);
                        if (ret) {
@@ -2689,7 +2939,7 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add(
 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
                                           u64 bytenr,
                                           struct btrfsic_dev_state *dev_state,
-                                          u64 dev_bytenr, char *data)
+                                          u64 dev_bytenr)
 {
        int num_copies;
        int mirror_num;
@@ -2698,10 +2948,10 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
        int match = 0;
 
        num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                     bytenr, PAGE_SIZE);
+                                     bytenr, state->metablock_size);
 
        for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
-               ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+               ret = btrfsic_map_block(state, bytenr, state->metablock_size,
                                        &block_ctx, mirror_num);
                if (ret) {
                        printk(KERN_INFO "btrfsic:"
@@ -2727,7 +2977,8 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
                       (unsigned long long)bytenr, dev_state->name,
                       (unsigned long long)dev_bytenr);
                for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
-                       ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+                       ret = btrfsic_map_block(state, bytenr,
+                                               state->metablock_size,
                                                &block_ctx, mirror_num);
                        if (ret)
                                continue;
@@ -2781,13 +3032,13 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
                               (unsigned long)bh->b_size, bh->b_data,
                               bh->b_bdev);
                btrfsic_process_written_block(dev_state, dev_bytenr,
-                                             bh->b_data, bh->b_size, NULL,
+                                             &bh->b_data, 1, NULL,
                                              NULL, bh, rw);
        } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n",
+                              "submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
                               rw, bh->b_bdev);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
@@ -2836,6 +3087,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
                unsigned int i;
                u64 dev_bytenr;
                int bio_is_patched;
+               char **mapped_datav;
 
                dev_bytenr = 512 * bio->bi_sector;
                bio_is_patched = 0;
@@ -2848,35 +3100,46 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
                               (unsigned long long)dev_bytenr,
                               bio->bi_bdev);
 
+               mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
+                                      GFP_NOFS);
+               if (!mapped_datav)
+                       goto leave;
                for (i = 0; i < bio->bi_vcnt; i++) {
-                       u8 *mapped_data;
-
-                       mapped_data = kmap(bio->bi_io_vec[i].bv_page);
+                       BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
+                       mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
+                       if (!mapped_datav[i]) {
+                               while (i > 0) {
+                                       i--;
+                                       kunmap(bio->bi_io_vec[i].bv_page);
+                               }
+                               kfree(mapped_datav);
+                               goto leave;
+                       }
                        if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
                             BTRFSIC_PRINT_MASK_VERBOSE) ==
                            (dev_state->state->print_mask &
                             (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
                              BTRFSIC_PRINT_MASK_VERBOSE)))
                                printk(KERN_INFO
-                                      "#%u: page=%p, mapped=%p, len=%u,"
-                                      " offset=%u\n",
+                                      "#%u: page=%p, len=%u, offset=%u\n",
                                       i, bio->bi_io_vec[i].bv_page,
-                                      mapped_data,
                                       bio->bi_io_vec[i].bv_len,
                                       bio->bi_io_vec[i].bv_offset);
-                       btrfsic_process_written_block(dev_state, dev_bytenr,
-                                                     mapped_data,
-                                                     bio->bi_io_vec[i].bv_len,
-                                                     bio, &bio_is_patched,
-                                                     NULL, rw);
+               }
+               btrfsic_process_written_block(dev_state, dev_bytenr,
+                                             mapped_datav, bio->bi_vcnt,
+                                             bio, &bio_is_patched,
+                                             NULL, rw);
+               while (i > 0) {
+                       i--;
                        kunmap(bio->bi_io_vec[i].bv_page);
-                       dev_bytenr += bio->bi_io_vec[i].bv_len;
                }
+               kfree(mapped_datav);
        } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n",
+                              "submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
                               rw, bio->bi_bdev);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
@@ -2903,6 +3166,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
                        bio->bi_end_io = btrfsic_bio_end_io;
                }
        }
+leave:
        mutex_unlock(&btrfsic_mutex);
 
        submit_bio(rw, bio);
@@ -2917,6 +3181,30 @@ int btrfsic_mount(struct btrfs_root *root,
        struct list_head *dev_head = &fs_devices->devices;
        struct btrfs_device *device;
 
+       if (root->nodesize != root->leafsize) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle nodesize %d != leafsize %d!\n",
+                      root->nodesize, root->leafsize);
+               return -1;
+       }
+       if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+                      root->nodesize, (unsigned long)PAGE_CACHE_SIZE);
+               return -1;
+       }
+       if (root->leafsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle leafsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+                      root->leafsize, (unsigned long)PAGE_CACHE_SIZE);
+               return -1;
+       }
+       if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+                      root->sectorsize, (unsigned long)PAGE_CACHE_SIZE);
+               return -1;
+       }
        state = kzalloc(sizeof(*state), GFP_NOFS);
        if (NULL == state) {
                printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
@@ -2933,6 +3221,8 @@ int btrfsic_mount(struct btrfs_root *root,
        state->print_mask = print_mask;
        state->include_extent_data = including_extent_data;
        state->csum_size = 0;
+       state->metablock_size = root->nodesize;
+       state->datablock_size = root->sectorsize;
        INIT_LIST_HEAD(&state->all_blocks_list);
        btrfsic_block_hashtable_init(&state->block_hashtable);
        btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
@@ -3049,7 +3339,7 @@ void btrfsic_unmount(struct btrfs_root *root,
                                btrfsic_block_link_free(l);
                }
 
-               if (b_all->is_iodone)
+               if (b_all->is_iodone || b_all->never_written)
                        btrfsic_block_free(b_all);
                else
                        printk(KERN_INFO "btrfs: attempt to free %c-block"
index 4106264fbc655ac79b26efa1177384ea92b72988..8206b3900587efa23570b5b8f7f8071ab6e73156 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/rbtree.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -37,7 +38,16 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
                              struct extent_buffer *dst_buf,
                              struct extent_buffer *src_buf);
 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                  struct btrfs_path *path, int level, int slot);
+                   struct btrfs_path *path, int level, int slot,
+                   int tree_mod_log);
+static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+                                struct extent_buffer *eb);
+struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
+                                         u32 blocksize, u64 parent_transid,
+                                         u64 time_seq);
+struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
+                                               u64 bytenr, u32 blocksize,
+                                               u64 time_seq);
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -255,7 +265,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
                                     new_root_objectid, &disk_key, level,
-                                    buf->start, 0, 1);
+                                    buf->start, 0);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -288,6 +298,449 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+enum mod_log_op {
+       MOD_LOG_KEY_REPLACE,
+       MOD_LOG_KEY_ADD,
+       MOD_LOG_KEY_REMOVE,
+       MOD_LOG_KEY_REMOVE_WHILE_FREEING,
+       MOD_LOG_KEY_REMOVE_WHILE_MOVING,
+       MOD_LOG_MOVE_KEYS,
+       MOD_LOG_ROOT_REPLACE,
+};
+
+struct tree_mod_move {
+       int dst_slot;
+       int nr_items;
+};
+
+struct tree_mod_root {
+       u64 logical;
+       u8 level;
+};
+
+struct tree_mod_elem {
+       struct rb_node node;
+       u64 index;              /* shifted logical */
+       struct seq_list elem;
+       enum mod_log_op op;
+
+       /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
+       int slot;
+
+       /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
+       u64 generation;
+
+       /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
+       struct btrfs_disk_key key;
+       u64 blockptr;
+
+       /* this is used for op == MOD_LOG_MOVE_KEYS */
+       struct tree_mod_move move;
+
+       /* this is used for op == MOD_LOG_ROOT_REPLACE */
+       struct tree_mod_root old_root;
+};
+
+static inline void
+__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
+{
+       elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
+       list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
+}
+
+void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem)
+{
+       elem->flags = 1;
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       __get_tree_mod_seq(fs_info, elem);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+}
+
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem)
+{
+       struct rb_root *tm_root;
+       struct rb_node *node;
+       struct rb_node *next;
+       struct seq_list *cur_elem;
+       struct tree_mod_elem *tm;
+       u64 min_seq = (u64)-1;
+       u64 seq_putting = elem->seq;
+
+       if (!seq_putting)
+               return;
+
+       BUG_ON(!(elem->flags & 1));
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       list_del(&elem->list);
+
+       list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
+               if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
+                       if (seq_putting > cur_elem->seq) {
+                               /*
+                                * blocker with lower sequence number exists, we
+                                * cannot remove anything from the log
+                                */
+                               goto out;
+                       }
+                       min_seq = cur_elem->seq;
+               }
+       }
+
+       /*
+        * anything that's lower than the lowest existing (read: blocked)
+        * sequence number can be removed from the tree.
+        */
+       write_lock(&fs_info->tree_mod_log_lock);
+       tm_root = &fs_info->tree_mod_log;
+       for (node = rb_first(tm_root); node; node = next) {
+               next = rb_next(node);
+               tm = container_of(node, struct tree_mod_elem, node);
+               if (tm->elem.seq > min_seq)
+                       continue;
+               rb_erase(node, tm_root);
+               list_del(&tm->elem.list);
+               kfree(tm);
+       }
+       write_unlock(&fs_info->tree_mod_log_lock);
+out:
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+}
+
+/*
+ * key order of the log:
+ *       index -> sequence
+ *
+ * the index is the shifted logical of the *new* root node for root replace
+ * operations, or the shifted logical of the affected block for all other
+ * operations.
+ */
+static noinline int
+__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
+{
+       struct rb_root *tm_root;
+       struct rb_node **new;
+       struct rb_node *parent = NULL;
+       struct tree_mod_elem *cur;
+       int ret = 0;
+
+       BUG_ON(!tm || !tm->elem.seq);
+
+       write_lock(&fs_info->tree_mod_log_lock);
+       tm_root = &fs_info->tree_mod_log;
+       new = &tm_root->rb_node;
+       while (*new) {
+               cur = container_of(*new, struct tree_mod_elem, node);
+               parent = *new;
+               if (cur->index < tm->index)
+                       new = &((*new)->rb_left);
+               else if (cur->index > tm->index)
+                       new = &((*new)->rb_right);
+               else if (cur->elem.seq < tm->elem.seq)
+                       new = &((*new)->rb_left);
+               else if (cur->elem.seq > tm->elem.seq)
+                       new = &((*new)->rb_right);
+               else {
+                       kfree(tm);
+                       ret = -EEXIST;
+                       goto unlock;
+               }
+       }
+
+       rb_link_node(&tm->node, parent, new);
+       rb_insert_color(&tm->node, tm_root);
+unlock:
+       write_unlock(&fs_info->tree_mod_log_lock);
+       return ret;
+}
+
+static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
+                                   struct extent_buffer *eb) {
+       smp_mb();
+       if (list_empty(&(fs_info)->tree_mod_seq_list))
+               return 1;
+       if (!eb)
+               return 0;
+       if (btrfs_header_level(eb) == 0)
+               return 1;
+       return 0;
+}
+
+/*
+ * This allocates memory and gets a tree modification sequence number when
+ * needed.
+ *
+ * Returns 0 when no sequence number is needed, < 0 on error.
+ * Returns 1 when a sequence number was added. In this case,
+ * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
+ * after inserting into the rb tree.
+ */
+static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
+                                struct tree_mod_elem **tm_ret)
+{
+       struct tree_mod_elem *tm;
+       int seq;
+
+       if (tree_mod_dont_log(fs_info, NULL))
+               return 0;
+
+       tm = *tm_ret = kzalloc(sizeof(*tm), flags);
+       if (!tm)
+               return -ENOMEM;
+
+       tm->elem.flags = 0;
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       if (list_empty(&fs_info->tree_mod_seq_list)) {
+               /*
+                * someone emptied the list while we were waiting for the lock.
+                * we must not add to the list, because no blocker exists. items
+                * are removed from the list only when the existing blocker is
+                * removed from the list.
+                */
+               kfree(tm);
+               seq = 0;
+               spin_unlock(&fs_info->tree_mod_seq_lock);
+       } else {
+               __get_tree_mod_seq(fs_info, &tm->elem);
+               seq = tm->elem.seq;
+       }
+
+       return seq;
+}
+
+static noinline int
+tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
+                            struct extent_buffer *eb, int slot,
+                            enum mod_log_op op, gfp_t flags)
+{
+       struct tree_mod_elem *tm;
+       int ret;
+
+       ret = tree_mod_alloc(fs_info, flags, &tm);
+       if (ret <= 0)
+               return ret;
+
+       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       if (op != MOD_LOG_KEY_ADD) {
+               btrfs_node_key(eb, &tm->key, slot);
+               tm->blockptr = btrfs_node_blockptr(eb, slot);
+       }
+       tm->op = op;
+       tm->slot = slot;
+       tm->generation = btrfs_node_ptr_generation(eb, slot);
+
+       ret = __tree_mod_log_insert(fs_info, tm);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+       return ret;
+}
+
+static noinline int
+tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
+                       int slot, enum mod_log_op op)
+{
+       return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
+}
+
+static noinline int
+tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
+                        struct extent_buffer *eb, int dst_slot, int src_slot,
+                        int nr_items, gfp_t flags)
+{
+       struct tree_mod_elem *tm;
+       int ret;
+       int i;
+
+       if (tree_mod_dont_log(fs_info, eb))
+               return 0;
+
+       for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
+               ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
+                                             MOD_LOG_KEY_REMOVE_WHILE_MOVING);
+               BUG_ON(ret < 0);
+       }
+
+       ret = tree_mod_alloc(fs_info, flags, &tm);
+       if (ret <= 0)
+               return ret;
+
+       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       tm->slot = src_slot;
+       tm->move.dst_slot = dst_slot;
+       tm->move.nr_items = nr_items;
+       tm->op = MOD_LOG_MOVE_KEYS;
+
+       ret = __tree_mod_log_insert(fs_info, tm);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+       return ret;
+}
+
+static noinline int
+tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
+                        struct extent_buffer *old_root,
+                        struct extent_buffer *new_root, gfp_t flags)
+{
+       struct tree_mod_elem *tm;
+       int ret;
+
+       ret = tree_mod_alloc(fs_info, flags, &tm);
+       if (ret <= 0)
+               return ret;
+
+       tm->index = new_root->start >> PAGE_CACHE_SHIFT;
+       tm->old_root.logical = old_root->start;
+       tm->old_root.level = btrfs_header_level(old_root);
+       tm->generation = btrfs_header_generation(old_root);
+       tm->op = MOD_LOG_ROOT_REPLACE;
+
+       ret = __tree_mod_log_insert(fs_info, tm);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+       return ret;
+}
+
+static struct tree_mod_elem *
+__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
+                     int smallest)
+{
+       struct rb_root *tm_root;
+       struct rb_node *node;
+       struct tree_mod_elem *cur = NULL;
+       struct tree_mod_elem *found = NULL;
+       u64 index = start >> PAGE_CACHE_SHIFT;
+
+       read_lock(&fs_info->tree_mod_log_lock);
+       tm_root = &fs_info->tree_mod_log;
+       node = tm_root->rb_node;
+       while (node) {
+               cur = container_of(node, struct tree_mod_elem, node);
+               if (cur->index < index) {
+                       node = node->rb_left;
+               } else if (cur->index > index) {
+                       node = node->rb_right;
+               } else if (cur->elem.seq < min_seq) {
+                       node = node->rb_left;
+               } else if (!smallest) {
+                       /* we want the node with the highest seq */
+                       if (found)
+                               BUG_ON(found->elem.seq > cur->elem.seq);
+                       found = cur;
+                       node = node->rb_left;
+               } else if (cur->elem.seq > min_seq) {
+                       /* we want the node with the smallest seq */
+                       if (found)
+                               BUG_ON(found->elem.seq < cur->elem.seq);
+                       found = cur;
+                       node = node->rb_right;
+               } else {
+                       found = cur;
+                       break;
+               }
+       }
+       read_unlock(&fs_info->tree_mod_log_lock);
+
+       return found;
+}
+
+/*
+ * this returns the element from the log with the smallest time sequence
+ * value that's in the log (the oldest log item). any element with a time
+ * sequence lower than min_seq will be ignored.
+ */
+static struct tree_mod_elem *
+tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
+                          u64 min_seq)
+{
+       return __tree_mod_log_search(fs_info, start, min_seq, 1);
+}
+
+/*
+ * this returns the element from the log with the largest time sequence
+ * value that's in the log (the most recent log item). any element with
+ * a time sequence lower than min_seq will be ignored.
+ */
+static struct tree_mod_elem *
+tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
+{
+       return __tree_mod_log_search(fs_info, start, min_seq, 0);
+}
+
+static inline void
+tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+                    struct extent_buffer *src, unsigned long dst_offset,
+                    unsigned long src_offset, int nr_items)
+{
+       int ret;
+       int i;
+
+       if (tree_mod_dont_log(fs_info, NULL))
+               return;
+
+       if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
+               return;
+
+       /* speed this up by single seq for all operations? */
+       for (i = 0; i < nr_items; i++) {
+               ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
+                                             MOD_LOG_KEY_REMOVE);
+               BUG_ON(ret < 0);
+               ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
+                                             MOD_LOG_KEY_ADD);
+               BUG_ON(ret < 0);
+       }
+}
+
+static inline void
+tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+                    int dst_offset, int src_offset, int nr_items)
+{
+       int ret;
+       ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
+                                      nr_items, GFP_NOFS);
+       BUG_ON(ret < 0);
+}
+
+static inline void
+tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
+                         struct extent_buffer *eb,
+                         struct btrfs_disk_key *disk_key, int slot, int atomic)
+{
+       int ret;
+
+       ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
+                                          MOD_LOG_KEY_REPLACE,
+                                          atomic ? GFP_ATOMIC : GFP_NOFS);
+       BUG_ON(ret < 0);
+}
+
+static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+                                struct extent_buffer *eb)
+{
+       int i;
+       int ret;
+       u32 nritems;
+
+       if (tree_mod_dont_log(fs_info, eb))
+               return;
+
+       nritems = btrfs_header_nritems(eb);
+       for (i = nritems - 1; i >= 0; i--) {
+               ret = tree_mod_log_insert_key(fs_info, eb, i,
+                                             MOD_LOG_KEY_REMOVE_WHILE_FREEING);
+               BUG_ON(ret < 0);
+       }
+}
+
+static inline void
+tree_mod_log_set_root_pointer(struct btrfs_root *root,
+                             struct extent_buffer *new_root_node)
+{
+       int ret;
+       tree_mod_log_free_eb(root->fs_info, root->node);
+       ret = tree_mod_log_insert_root(root->fs_info, root->node,
+                                      new_root_node, GFP_NOFS);
+       BUG_ON(ret < 0);
+}
+
 /*
  * check if the tree block can be shared by multiple trees
  */
@@ -409,6 +862,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret); /* -ENOMEM */
                }
+               /*
+                * don't log freeing in case we're freeing the root node, this
+                * is done by tree_mod_log_set_root_pointer later
+                */
+               if (buf != root->node && btrfs_header_level(buf) != 0)
+                       tree_mod_log_free_eb(root->fs_info, buf);
                clean_tree_block(trans, root, buf);
                *last_ref = 1;
        }
@@ -467,7 +926,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
                                     root->root_key.objectid, &disk_key,
-                                    level, search_start, empty_size, 1);
+                                    level, search_start, empty_size);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -506,10 +965,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                        parent_start = 0;
 
                extent_buffer_get(cow);
+               tree_mod_log_set_root_pointer(root, cow);
                rcu_assign_pointer(root->node, cow);
 
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref, 1);
+                                     last_ref);
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
@@ -519,13 +979,15 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                        parent_start = 0;
 
                WARN_ON(trans->transid != btrfs_header_generation(parent));
+               tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
+                                       MOD_LOG_KEY_REPLACE);
                btrfs_set_node_blockptr(parent, parent_slot,
                                        cow->start);
                btrfs_set_node_ptr_generation(parent, parent_slot,
                                              trans->transid);
                btrfs_mark_buffer_dirty(parent);
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref, 1);
+                                     last_ref);
        }
        if (unlock_orig)
                btrfs_tree_unlock(buf);
@@ -535,6 +997,229 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+/*
+ * returns the logical address of the oldest predecessor of the given root.
+ * entries older than time_seq are ignored.
+ */
+static struct tree_mod_elem *
+__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
+                          struct btrfs_root *root, u64 time_seq)
+{
+       struct tree_mod_elem *tm;
+       struct tree_mod_elem *found = NULL;
+       u64 root_logical = root->node->start;
+       int looped = 0;
+
+       if (!time_seq)
+               return 0;
+
+       /*
+        * the very last operation that's logged for a root is the replacement
+        * operation (if it is replaced at all). this has the index of the *new*
+        * root, making it the very first operation that's logged for this root.
+        */
+       while (1) {
+               tm = tree_mod_log_search_oldest(fs_info, root_logical,
+                                               time_seq);
+               if (!looped && !tm)
+                       return 0;
+               /*
+                * if there are no tree operation for the oldest root, we simply
+                * return it. this should only happen if that (old) root is at
+                * level 0.
+                */
+               if (!tm)
+                       break;
+
+               /*
+                * if there's an operation that's not a root replacement, we
+                * found the oldest version of our root. normally, we'll find a
+                * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
+                */
+               if (tm->op != MOD_LOG_ROOT_REPLACE)
+                       break;
+
+               found = tm;
+               root_logical = tm->old_root.logical;
+               BUG_ON(root_logical == root->node->start);
+               looped = 1;
+       }
+
+       /* if there's no old root to return, return what we found instead */
+       if (!found)
+               found = tm;
+
+       return found;
+}
+
+/*
+ * tm is a pointer to the first operation to rewind within eb. then, all
+ * previous operations will be rewinded (until we reach something older than
+ * time_seq).
+ */
+static void
+__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
+                     struct tree_mod_elem *first_tm)
+{
+       u32 n;
+       struct rb_node *next;
+       struct tree_mod_elem *tm = first_tm;
+       unsigned long o_dst;
+       unsigned long o_src;
+       unsigned long p_size = sizeof(struct btrfs_key_ptr);
+
+       n = btrfs_header_nritems(eb);
+       while (tm && tm->elem.seq >= time_seq) {
+               /*
+                * all the operations are recorded with the operator used for
+                * the modification. as we're going backwards, we do the
+                * opposite of each operation here.
+                */
+               switch (tm->op) {
+               case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
+                       BUG_ON(tm->slot < n);
+               case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
+               case MOD_LOG_KEY_REMOVE:
+                       btrfs_set_node_key(eb, &tm->key, tm->slot);
+                       btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+                       btrfs_set_node_ptr_generation(eb, tm->slot,
+                                                     tm->generation);
+                       n++;
+                       break;
+               case MOD_LOG_KEY_REPLACE:
+                       BUG_ON(tm->slot >= n);
+                       btrfs_set_node_key(eb, &tm->key, tm->slot);
+                       btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+                       btrfs_set_node_ptr_generation(eb, tm->slot,
+                                                     tm->generation);
+                       break;
+               case MOD_LOG_KEY_ADD:
+                       /* if a move operation is needed it's in the log */
+                       n--;
+                       break;
+               case MOD_LOG_MOVE_KEYS:
+                       o_dst = btrfs_node_key_ptr_offset(tm->slot);
+                       o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
+                       memmove_extent_buffer(eb, o_dst, o_src,
+                                             tm->move.nr_items * p_size);
+                       break;
+               case MOD_LOG_ROOT_REPLACE:
+                       /*
+                        * this operation is special. for roots, this must be
+                        * handled explicitly before rewinding.
+                        * for non-roots, this operation may exist if the node
+                        * was a root: root A -> child B; then A gets empty and
+                        * B is promoted to the new root. in the mod log, we'll
+                        * have a root-replace operation for B, a tree block
+                        * that is no root. we simply ignore that operation.
+                        */
+                       break;
+               }
+               next = rb_next(&tm->node);
+               if (!next)
+                       break;
+               tm = container_of(next, struct tree_mod_elem, node);
+               if (tm->index != first_tm->index)
+                       break;
+       }
+       btrfs_set_header_nritems(eb, n);
+}
+
+static struct extent_buffer *
+tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
+                   u64 time_seq)
+{
+       struct extent_buffer *eb_rewin;
+       struct tree_mod_elem *tm;
+
+       if (!time_seq)
+               return eb;
+
+       if (btrfs_header_level(eb) == 0)
+               return eb;
+
+       tm = tree_mod_log_search(fs_info, eb->start, time_seq);
+       if (!tm)
+               return eb;
+
+       if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+               BUG_ON(tm->slot != 0);
+               eb_rewin = alloc_dummy_extent_buffer(eb->start,
+                                               fs_info->tree_root->nodesize);
+               BUG_ON(!eb_rewin);
+               btrfs_set_header_bytenr(eb_rewin, eb->start);
+               btrfs_set_header_backref_rev(eb_rewin,
+                                            btrfs_header_backref_rev(eb));
+               btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
+               btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
+       } else {
+               eb_rewin = btrfs_clone_extent_buffer(eb);
+               BUG_ON(!eb_rewin);
+       }
+
+       extent_buffer_get(eb_rewin);
+       free_extent_buffer(eb);
+
+       __tree_mod_log_rewind(eb_rewin, time_seq, tm);
+
+       return eb_rewin;
+}
+
+/*
+ * get_old_root() rewinds the state of @root's root node to the given @time_seq
+ * value. If there are no changes, the current root->root_node is returned. If
+ * anything changed in between, there's a fresh buffer allocated on which the
+ * rewind operations are done. In any case, the returned buffer is read locked.
+ * Returns NULL on error (with no locks held).
+ */
+static inline struct extent_buffer *
+get_old_root(struct btrfs_root *root, u64 time_seq)
+{
+       struct tree_mod_elem *tm;
+       struct extent_buffer *eb;
+       struct tree_mod_root *old_root = NULL;
+       u64 old_generation = 0;
+       u64 logical;
+
+       eb = btrfs_read_lock_root_node(root);
+       tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+       if (!tm)
+               return root->node;
+
+       if (tm->op == MOD_LOG_ROOT_REPLACE) {
+               old_root = &tm->old_root;
+               old_generation = tm->generation;
+               logical = old_root->logical;
+       } else {
+               logical = root->node->start;
+       }
+
+       tm = tree_mod_log_search(root->fs_info, logical, time_seq);
+       if (old_root)
+               eb = alloc_dummy_extent_buffer(logical, root->nodesize);
+       else
+               eb = btrfs_clone_extent_buffer(root->node);
+       btrfs_tree_read_unlock(root->node);
+       free_extent_buffer(root->node);
+       if (!eb)
+               return NULL;
+       btrfs_tree_read_lock(eb);
+       if (old_root) {
+               btrfs_set_header_bytenr(eb, eb->start);
+               btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
+               btrfs_set_header_owner(eb, root->root_key.objectid);
+               btrfs_set_header_level(eb, old_root->level);
+               btrfs_set_header_generation(eb, old_generation);
+       }
+       if (tm)
+               __tree_mod_log_rewind(eb, time_seq, tm);
+       else
+               WARN_ON(btrfs_header_level(eb) != 0);
+       extent_buffer_get(eb);
+
+       return eb;
+}
+
 static inline int should_cow_block(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   struct extent_buffer *buf)
@@ -739,7 +1424,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                                if (!cur)
                                        return -EIO;
                        } else if (!uptodate) {
-                               btrfs_read_buffer(cur, gen);
+                               err = btrfs_read_buffer(cur, gen);
+                               if (err) {
+                                       free_extent_buffer(cur);
+                                       return err;
+                               }
                        }
                }
                if (search_start == 0)
@@ -854,20 +1543,18 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
                      int level, int *slot)
 {
-       if (level == 0) {
+       if (level == 0)
                return generic_bin_search(eb,
                                          offsetof(struct btrfs_leaf, items),
                                          sizeof(struct btrfs_item),
                                          key, btrfs_header_nritems(eb),
                                          slot);
-       } else {
+       else
                return generic_bin_search(eb,
                                          offsetof(struct btrfs_node, ptrs),
                                          sizeof(struct btrfs_key_ptr),
                                          key, btrfs_header_nritems(eb),
                                          slot);
-       }
-       return -1;
 }
 
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
@@ -974,6 +1661,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        goto enospc;
                }
 
+               tree_mod_log_set_root_pointer(root, child);
                rcu_assign_pointer(root->node, child);
 
                add_root_to_dirty_list(root);
@@ -987,7 +1675,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                free_extent_buffer(mid);
 
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
+               btrfs_free_tree_block(trans, root, mid, 0, 1);
                /* once for the root ptr */
                free_extent_buffer_stale(mid);
                return 0;
@@ -996,8 +1684,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
            BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
                return 0;
 
-       btrfs_header_nritems(mid);
-
        left = read_node_slot(root, parent, pslot - 1);
        if (left) {
                btrfs_tree_lock(left);
@@ -1027,7 +1713,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                wret = push_node_left(trans, root, left, mid, 1);
                if (wret < 0)
                        ret = wret;
-               btrfs_header_nritems(mid);
        }
 
        /*
@@ -1040,14 +1725,16 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                if (btrfs_header_nritems(right) == 0) {
                        clean_tree_block(trans, root, right);
                        btrfs_tree_unlock(right);
-                       del_ptr(trans, root, path, level + 1, pslot + 1);
+                       del_ptr(trans, root, path, level + 1, pslot + 1, 1);
                        root_sub_used(root, right->len);
-                       btrfs_free_tree_block(trans, root, right, 0, 1, 0);
+                       btrfs_free_tree_block(trans, root, right, 0, 1);
                        free_extent_buffer_stale(right);
                        right = NULL;
                } else {
                        struct btrfs_disk_key right_key;
                        btrfs_node_key(right, &right_key, 0);
+                       tree_mod_log_set_node_key(root->fs_info, parent,
+                                                 &right_key, pslot + 1, 0);
                        btrfs_set_node_key(parent, &right_key, pslot + 1);
                        btrfs_mark_buffer_dirty(parent);
                }
@@ -1082,15 +1769,17 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
        if (btrfs_header_nritems(mid) == 0) {
                clean_tree_block(trans, root, mid);
                btrfs_tree_unlock(mid);
-               del_ptr(trans, root, path, level + 1, pslot);
+               del_ptr(trans, root, path, level + 1, pslot, 1);
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
+               btrfs_free_tree_block(trans, root, mid, 0, 1);
                free_extent_buffer_stale(mid);
                mid = NULL;
        } else {
                /* update the parent key to reflect our changes */
                struct btrfs_disk_key mid_key;
                btrfs_node_key(mid, &mid_key, 0);
+               tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
+                                         pslot, 0);
                btrfs_set_node_key(parent, &mid_key, pslot);
                btrfs_mark_buffer_dirty(parent);
        }
@@ -1188,6 +1877,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
                        struct btrfs_disk_key disk_key;
                        orig_slot += left_nr;
                        btrfs_node_key(mid, &disk_key, 0);
+                       tree_mod_log_set_node_key(root->fs_info, parent,
+                                                 &disk_key, pslot, 0);
                        btrfs_set_node_key(parent, &disk_key, pslot);
                        btrfs_mark_buffer_dirty(parent);
                        if (btrfs_header_nritems(left) > orig_slot) {
@@ -1239,6 +1930,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
                        struct btrfs_disk_key disk_key;
 
                        btrfs_node_key(right, &disk_key, 0);
+                       tree_mod_log_set_node_key(root->fs_info, parent,
+                                                 &disk_key, pslot + 1, 0);
                        btrfs_set_node_key(parent, &disk_key, pslot + 1);
                        btrfs_mark_buffer_dirty(parent);
 
@@ -1496,7 +2189,7 @@ static int
 read_block_for_search(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *p,
                       struct extent_buffer **eb_ret, int level, int slot,
-                      struct btrfs_key *key)
+                      struct btrfs_key *key, u64 time_seq)
 {
        u64 blocknr;
        u64 gen;
@@ -1850,7 +2543,7 @@ cow_done:
                        }
 
                        err = read_block_for_search(trans, root, p,
-                                                   &b, level, slot, key);
+                                                   &b, level, slot, key, 0);
                        if (err == -EAGAIN)
                                goto again;
                        if (err) {
@@ -1921,6 +2614,113 @@ done:
        return ret;
 }
 
+/*
+ * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
+ * current state of the tree together with the operations recorded in the tree
+ * modification log to search for the key in a previous version of this tree, as
+ * denoted by the time_seq parameter.
+ *
+ * Naturally, there is no support for insert, delete or cow operations.
+ *
+ * The resulting path and return value will be set up as if we called
+ * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
+ */
+int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+                         struct btrfs_path *p, u64 time_seq)
+{
+       struct extent_buffer *b;
+       int slot;
+       int ret;
+       int err;
+       int level;
+       int lowest_unlock = 1;
+       u8 lowest_level = 0;
+
+       lowest_level = p->lowest_level;
+       WARN_ON(p->nodes[0] != NULL);
+
+       if (p->search_commit_root) {
+               BUG_ON(time_seq);
+               return btrfs_search_slot(NULL, root, key, p, 0, 0);
+       }
+
+again:
+       b = get_old_root(root, time_seq);
+       level = btrfs_header_level(b);
+       p->locks[level] = BTRFS_READ_LOCK;
+
+       while (b) {
+               level = btrfs_header_level(b);
+               p->nodes[level] = b;
+               btrfs_clear_path_blocking(p, NULL, 0);
+
+               /*
+                * we have a lock on b and as long as we aren't changing
+                * the tree, there is no way to for the items in b to change.
+                * It is safe to drop the lock on our parent before we
+                * go through the expensive btree search on b.
+                */
+               btrfs_unlock_up_safe(p, level + 1);
+
+               ret = bin_search(b, key, level, &slot);
+
+               if (level != 0) {
+                       int dec = 0;
+                       if (ret && slot > 0) {
+                               dec = 1;
+                               slot -= 1;
+                       }
+                       p->slots[level] = slot;
+                       unlock_up(p, level, lowest_unlock, 0, NULL);
+
+                       if (level == lowest_level) {
+                               if (dec)
+                                       p->slots[level]++;
+                               goto done;
+                       }
+
+                       err = read_block_for_search(NULL, root, p, &b, level,
+                                                   slot, key, time_seq);
+                       if (err == -EAGAIN)
+                               goto again;
+                       if (err) {
+                               ret = err;
+                               goto done;
+                       }
+
+                       level = btrfs_header_level(b);
+                       err = btrfs_try_tree_read_lock(b);
+                       if (!err) {
+                               btrfs_set_path_blocking(p);
+                               btrfs_tree_read_lock(b);
+                               btrfs_clear_path_blocking(p, b,
+                                                         BTRFS_READ_LOCK);
+                       }
+                       p->locks[level] = BTRFS_READ_LOCK;
+                       p->nodes[level] = b;
+                       b = tree_mod_log_rewind(root->fs_info, b, time_seq);
+                       if (b != p->nodes[level]) {
+                               btrfs_tree_unlock_rw(p->nodes[level],
+                                                    p->locks[level]);
+                               p->locks[level] = 0;
+                               p->nodes[level] = b;
+                       }
+               } else {
+                       p->slots[level] = slot;
+                       unlock_up(p, level, lowest_unlock, 0, NULL);
+                       goto done;
+               }
+       }
+       ret = 1;
+done:
+       if (!p->leave_spinning)
+               btrfs_set_path_blocking(p);
+       if (ret < 0)
+               btrfs_release_path(p);
+
+       return ret;
+}
+
 /*
  * adjust the pointers going up the tree, starting at level
  * making sure the right key of each node is points to 'key'.
@@ -1941,6 +2741,7 @@ static void fixup_low_keys(struct btrfs_trans_handle *trans,
                if (!path->nodes[i])
                        break;
                t = path->nodes[i];
+               tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
                btrfs_set_node_key(t, key, tslot);
                btrfs_mark_buffer_dirty(path->nodes[i]);
                if (tslot != 0)
@@ -2023,12 +2824,16 @@ static int push_node_left(struct btrfs_trans_handle *trans,
        } else
                push_items = min(src_nritems - 8, push_items);
 
+       tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+                            push_items);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(dst_nritems),
                           btrfs_node_key_ptr_offset(0),
                           push_items * sizeof(struct btrfs_key_ptr));
 
        if (push_items < src_nritems) {
+               tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
+                                    src_nritems - push_items);
                memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
                                      btrfs_node_key_ptr_offset(push_items),
                                      (src_nritems - push_items) *
@@ -2082,11 +2887,14 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
        if (max_push < push_items)
                push_items = max_push;
 
+       tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
        memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
                                      btrfs_node_key_ptr_offset(0),
                                      (dst_nritems) *
                                      sizeof(struct btrfs_key_ptr));
 
+       tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+                            src_nritems - push_items, push_items);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -2129,7 +2937,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 
        c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                   root->root_key.objectid, &lower_key,
-                                  level, root->node->start, 0, 0);
+                                  level, root->node->start, 0);
        if (IS_ERR(c))
                return PTR_ERR(c);
 
@@ -2161,6 +2969,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(c);
 
        old = root->node;
+       tree_mod_log_set_root_pointer(root, c);
        rcu_assign_pointer(root->node, c);
 
        /* the super has an extra ref to root->node */
@@ -2188,6 +2997,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
 {
        struct extent_buffer *lower;
        int nritems;
+       int ret;
 
        BUG_ON(!path->nodes[level]);
        btrfs_assert_tree_locked(path->nodes[level]);
@@ -2196,11 +3006,19 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
        BUG_ON(slot > nritems);
        BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
        if (slot != nritems) {
+               if (level)
+                       tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
+                                            slot, nritems - slot);
                memmove_extent_buffer(lower,
                              btrfs_node_key_ptr_offset(slot + 1),
                              btrfs_node_key_ptr_offset(slot),
                              (nritems - slot) * sizeof(struct btrfs_key_ptr));
        }
+       if (level) {
+               ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
+                                             MOD_LOG_KEY_ADD);
+               BUG_ON(ret < 0);
+       }
        btrfs_set_node_key(lower, key, slot);
        btrfs_set_node_blockptr(lower, slot, bytenr);
        WARN_ON(trans->transid == 0);
@@ -2252,7 +3070,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 
        split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, level, c->start, 0, 0);
+                                       &disk_key, level, c->start, 0);
        if (IS_ERR(split))
                return PTR_ERR(split);
 
@@ -2271,7 +3089,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
                            (unsigned long)btrfs_header_chunk_tree_uuid(split),
                            BTRFS_UUID_SIZE);
 
-
+       tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
        copy_extent_buffer(split, c,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(mid),
@@ -3004,7 +3822,7 @@ again:
 
        right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, 0, l->start, 0, 0);
+                                       &disk_key, 0, l->start, 0);
        if (IS_ERR(right))
                return PTR_ERR(right);
 
@@ -3749,19 +4567,29 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
  * empty a node.
  */
 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                   struct btrfs_path *path, int level, int slot)
+                   struct btrfs_path *path, int level, int slot,
+                   int tree_mod_log)
 {
        struct extent_buffer *parent = path->nodes[level];
        u32 nritems;
+       int ret;
 
        nritems = btrfs_header_nritems(parent);
        if (slot != nritems - 1) {
+               if (tree_mod_log && level)
+                       tree_mod_log_eb_move(root->fs_info, parent, slot,
+                                            slot + 1, nritems - slot - 1);
                memmove_extent_buffer(parent,
                              btrfs_node_key_ptr_offset(slot),
                              btrfs_node_key_ptr_offset(slot + 1),
                              sizeof(struct btrfs_key_ptr) *
                              (nritems - slot - 1));
+       } else if (tree_mod_log && level) {
+               ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
+                                             MOD_LOG_KEY_REMOVE);
+               BUG_ON(ret < 0);
        }
+
        nritems--;
        btrfs_set_header_nritems(parent, nritems);
        if (nritems == 0 && parent == root->node) {
@@ -3793,7 +4621,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
                                    struct extent_buffer *leaf)
 {
        WARN_ON(btrfs_header_generation(leaf) != trans->transid);
-       del_ptr(trans, root, path, 1, path->slots[1]);
+       del_ptr(trans, root, path, 1, path->slots[1], 1);
 
        /*
         * btrfs_free_extent is expensive, we want to make sure we
@@ -3804,7 +4632,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
        root_sub_used(root, leaf->len);
 
        extent_buffer_get(leaf);
-       btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
+       btrfs_free_tree_block(trans, root, leaf, 0, 1);
        free_extent_buffer_stale(leaf);
 }
 /*
@@ -4201,6 +5029,12 @@ next:
  * returns < 0 on io errors.
  */
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
+{
+       return btrfs_next_old_leaf(root, path, 0);
+}
+
+int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
+                       u64 time_seq)
 {
        int slot;
        int level;
@@ -4226,7 +5060,10 @@ again:
        path->keep_locks = 1;
        path->leave_spinning = 1;
 
-       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (time_seq)
+               ret = btrfs_search_old_slot(root, &key, path, time_seq);
+       else
+               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        path->keep_locks = 0;
 
        if (ret < 0)
@@ -4271,7 +5108,7 @@ again:
                next = c;
                next_rw_lock = path->locks[level];
                ret = read_block_for_search(NULL, root, path, &next, level,
-                                           slot, &key);
+                                           slot, &key, 0);
                if (ret == -EAGAIN)
                        goto again;
 
@@ -4282,6 +5119,18 @@ again:
 
                if (!path->skip_locking) {
                        ret = btrfs_try_tree_read_lock(next);
+                       if (!ret && time_seq) {
+                               /*
+                                * If we don't get the lock, we may be racing
+                                * with push_leaf_left, holding that lock while
+                                * itself waiting for the leaf we've currently
+                                * locked. To solve this situation, we give up
+                                * on our lock and cycle.
+                                */
+                               btrfs_release_path(path);
+                               cond_resched();
+                               goto again;
+                       }
                        if (!ret) {
                                btrfs_set_path_blocking(path);
                                btrfs_tree_read_lock(next);
@@ -4308,7 +5157,7 @@ again:
                        break;
 
                ret = read_block_for_search(NULL, root, path, &next, level,
-                                           0, &key);
+                                           0, &key, 0);
                if (ret == -EAGAIN)
                        goto again;
 
index 8fd72331d6008c100e48db1c808566eb382187b2..fa5c45b39075d858e9803c6cb1044338c7ee5d35 100644 (file)
@@ -173,6 +173,9 @@ static int btrfs_csum_sizes[] = { 4, 0 };
 #define BTRFS_FT_XATTR         8
 #define BTRFS_FT_MAX           9
 
+/* ioprio of readahead is set to idle */
+#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
+
 /*
  * The key defines the order in the tree, and so it also defines (optimal)
  * block layout.
@@ -823,6 +826,14 @@ struct btrfs_csum_item {
        u8 csum;
 } __attribute__ ((__packed__));
 
+struct btrfs_dev_stats_item {
+       /*
+        * grow this item struct at the end for future enhancements and keep
+        * the existing values unchanged
+        */
+       __le64 values[BTRFS_DEV_STAT_VALUES_MAX];
+} __attribute__ ((__packed__));
+
 /* different types of block groups (and chunks) */
 #define BTRFS_BLOCK_GROUP_DATA         (1ULL << 0)
 #define BTRFS_BLOCK_GROUP_SYSTEM       (1ULL << 1)
@@ -1129,6 +1140,15 @@ struct btrfs_fs_info {
        spinlock_t delayed_iput_lock;
        struct list_head delayed_iputs;
 
+       /* this protects tree_mod_seq_list */
+       spinlock_t tree_mod_seq_lock;
+       atomic_t tree_mod_seq;
+       struct list_head tree_mod_seq_list;
+
+       /* this protects tree_mod_log */
+       rwlock_t tree_mod_log_lock;
+       struct rb_root tree_mod_log;
+
        atomic_t nr_async_submits;
        atomic_t async_submit_draining;
        atomic_t nr_async_bios;
@@ -1375,7 +1395,7 @@ struct btrfs_root {
        struct list_head root_list;
 
        spinlock_t orphan_lock;
-       struct list_head orphan_list;
+       atomic_t orphan_inodes;
        struct btrfs_block_rsv *orphan_block_rsv;
        int orphan_item_inserted;
        int orphan_cleanup_state;
@@ -1507,6 +1527,12 @@ struct btrfs_ioctl_defrag_range_args {
 
 #define BTRFS_BALANCE_ITEM_KEY 248
 
+/*
+ * Persistantly stores the io stats in the device tree.
+ * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid).
+ */
+#define BTRFS_DEV_STATS_KEY    249
+
 /*
  * string items are for debugging.  They just store a short string of
  * data in the FS
@@ -2415,6 +2441,30 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
        return btrfs_item_size(eb, e) - offset;
 }
 
+/* btrfs_dev_stats_item */
+static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
+                                       struct btrfs_dev_stats_item *ptr,
+                                       int index)
+{
+       u64 val;
+
+       read_extent_buffer(eb, &val,
+                          offsetof(struct btrfs_dev_stats_item, values) +
+                           ((unsigned long)ptr) + (index * sizeof(u64)),
+                          sizeof(val));
+       return val;
+}
+
+static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb,
+                                            struct btrfs_dev_stats_item *ptr,
+                                            int index, u64 val)
+{
+       write_extent_buffer(eb, &val,
+                           offsetof(struct btrfs_dev_stats_item, values) +
+                            ((unsigned long)ptr) + (index * sizeof(u64)),
+                           sizeof(val));
+}
+
 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
 {
        return sb->s_fs_info;
@@ -2496,11 +2546,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size, int for_cow);
+                                       u64 hint, u64 empty_size);
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref, int for_cow);
+                          u64 parent, int last_ref);
 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
                                            struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize,
@@ -2659,6 +2709,8 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
                      *root, struct btrfs_key *key, struct btrfs_path *p, int
                      ins_len, int cow);
+int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+                         struct btrfs_path *p, u64 time_seq);
 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct extent_buffer *parent,
                       int start_slot, int cache_only, u64 *last_ret,
@@ -2701,13 +2753,20 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
-static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
+                       u64 time_seq);
+static inline int btrfs_next_old_item(struct btrfs_root *root,
+                                     struct btrfs_path *p, u64 time_seq)
 {
        ++p->slots[0];
        if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
-               return btrfs_next_leaf(root, p);
+               return btrfs_next_old_leaf(root, p, time_seq);
        return 0;
 }
+static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+{
+       return btrfs_next_old_item(root, p, 0);
+}
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
@@ -2922,7 +2981,6 @@ int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
 int btrfs_dirty_inode(struct inode *inode);
-int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
@@ -3098,4 +3156,23 @@ void btrfs_reada_detach(void *handle);
 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
                         u64 start, int err);
 
+/* delayed seq elem */
+struct seq_list {
+       struct list_head list;
+       u64 seq;
+       u32 flags;
+};
+
+void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem);
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem);
+
+static inline int is_fstree(u64 rootid)
+{
+       if (rootid == BTRFS_FS_TREE_OBJECTID ||
+           (s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+               return 1;
+       return 0;
+}
 #endif
index 03e3748d84d02407c19c6d46648667a56f13ba3e..2399f4086915acdc724f4def2fe308cf24d04c6f 100644 (file)
@@ -669,8 +669,8 @@ static int btrfs_delayed_inode_reserve_metadata(
                return ret;
        } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
                spin_lock(&BTRFS_I(inode)->lock);
-               if (BTRFS_I(inode)->delalloc_meta_reserved) {
-                       BTRFS_I(inode)->delalloc_meta_reserved = 0;
+               if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                                      &BTRFS_I(inode)->runtime_flags)) {
                        spin_unlock(&BTRFS_I(inode)->lock);
                        release = true;
                        goto migrate;
@@ -1706,7 +1706,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
        btrfs_set_stack_inode_generation(inode_item,
                                         BTRFS_I(inode)->generation);
-       btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
+       btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
        btrfs_set_stack_inode_transid(inode_item, trans->transid);
        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
@@ -1754,7 +1754,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
-       BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
+       inode->i_version = btrfs_stack_inode_sequence(inode_item);
        inode->i_rdev = 0;
        *rdev = btrfs_stack_inode_rdev(inode_item);
        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
@@ -1879,3 +1879,21 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
                }
        }
 }
+
+void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
+{
+       struct btrfs_delayed_root *delayed_root;
+       struct btrfs_delayed_node *curr_node, *prev_node;
+
+       delayed_root = btrfs_get_delayed_root(root);
+
+       curr_node = btrfs_first_delayed_node(delayed_root);
+       while (curr_node) {
+               __btrfs_kill_delayed_node(curr_node);
+
+               prev_node = curr_node;
+               curr_node = btrfs_next_delayed_node(curr_node);
+               btrfs_release_delayed_node(prev_node);
+       }
+}
+
index 7083d08b2a212bda501d7d3f7e08fe9c18bfe3a0..f5aa4023d3e18fe65488b91283b41b968da4e50d 100644 (file)
@@ -124,6 +124,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev);
 /* Used for drop dead root */
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
 
+/* Used for clean the transaction */
+void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
+
 /* Used for readdir() */
 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
                             struct list_head *del_list);
index 69f22e3ab3bc307974b5cae14f99310a498b54cf..13ae7b04790eaff72e8c23fb145fca8bfae88175 100644 (file)
@@ -525,7 +525,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        ref->is_head = 0;
        ref->in_tree = 1;
 
-       if (need_ref_seq(for_cow, ref_root))
+       if (is_fstree(ref_root))
                seq = inc_delayed_seq(delayed_refs);
        ref->seq = seq;
 
@@ -584,7 +584,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        ref->is_head = 0;
        ref->in_tree = 1;
 
-       if (need_ref_seq(for_cow, ref_root))
+       if (is_fstree(ref_root))
                seq = inc_delayed_seq(delayed_refs);
        ref->seq = seq;
 
@@ -658,10 +658,11 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
                                   num_bytes, parent, ref_root, level, action,
                                   for_cow);
-       if (!need_ref_seq(for_cow, ref_root) &&
+       if (!is_fstree(ref_root) &&
            waitqueue_active(&delayed_refs->seq_wait))
                wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
+
        return 0;
 }
 
@@ -706,10 +707,11 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
                                   num_bytes, parent, ref_root, owner, offset,
                                   action, for_cow);
-       if (!need_ref_seq(for_cow, ref_root) &&
+       if (!is_fstree(ref_root) &&
            waitqueue_active(&delayed_refs->seq_wait))
                wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
+
        return 0;
 }
 
index d8f244d9492511e3b108b26bcf4da1bc9fbf6826..413927fb9957e41fdcfb82511e63d416b8a36c76 100644 (file)
@@ -195,11 +195,6 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 search_start);
 
-struct seq_list {
-       struct list_head list;
-       u64 seq;
-};
-
 static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
 {
        assert_spin_locked(&delayed_refs->lock);
@@ -229,25 +224,6 @@ btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
 int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
                            u64 seq);
 
-/*
- * delayed refs with a ref_seq > 0 must be held back during backref walking.
- * this only applies to items in one of the fs-trees. for_cow items never need
- * to be held back, so they won't get a ref_seq number.
- */
-static inline int need_ref_seq(int for_cow, u64 rootid)
-{
-       if (for_cow)
-               return 0;
-
-       if (rootid == BTRFS_FS_TREE_OBJECTID)
-               return 1;
-
-       if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
-               return 1;
-
-       return 0;
-}
-
 /*
  * a node might live in a head or a regular ref, this lets you
  * test for the proper type to use.
index e1fe74a2ce16e6a4e0b38129160f484e642c42fa..2936ca49b3b4af3a799f7585b74038d289ab8278 100644 (file)
@@ -44,6 +44,7 @@
 #include "free-space-cache.h"
 #include "inode-map.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -1153,7 +1154,6 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->orphan_block_rsv = NULL;
 
        INIT_LIST_HEAD(&root->dirty_list);
-       INIT_LIST_HEAD(&root->orphan_list);
        INIT_LIST_HEAD(&root->root_list);
        spin_lock_init(&root->orphan_lock);
        spin_lock_init(&root->inode_lock);
@@ -1166,6 +1166,7 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        atomic_set(&root->log_commit[0], 0);
        atomic_set(&root->log_commit[1], 0);
        atomic_set(&root->log_writers, 0);
+       atomic_set(&root->orphan_inodes, 0);
        root->log_batch = 0;
        root->log_transid = 0;
        root->last_log_commit = 0;
@@ -1252,7 +1253,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
                                      BTRFS_TREE_LOG_OBJECTID, NULL,
-                                     0, 0, 0, 0);
+                                     0, 0, 0);
        if (IS_ERR(leaf)) {
                kfree(root);
                return ERR_CAST(leaf);
@@ -1914,11 +1915,14 @@ int open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->delayed_iput_lock);
        spin_lock_init(&fs_info->defrag_inodes_lock);
        spin_lock_init(&fs_info->free_chunk_lock);
+       spin_lock_init(&fs_info->tree_mod_seq_lock);
+       rwlock_init(&fs_info->tree_mod_log_lock);
        mutex_init(&fs_info->reloc_mutex);
 
        init_completion(&fs_info->kobj_unregister);
        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
        INIT_LIST_HEAD(&fs_info->space_info);
+       INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
        btrfs_mapping_init(&fs_info->mapping_tree);
        btrfs_init_block_rsv(&fs_info->global_block_rsv);
        btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
@@ -1931,12 +1935,14 @@ int open_ctree(struct super_block *sb,
        atomic_set(&fs_info->async_submit_draining, 0);
        atomic_set(&fs_info->nr_async_bios, 0);
        atomic_set(&fs_info->defrag_running, 0);
+       atomic_set(&fs_info->tree_mod_seq, 0);
        fs_info->sb = sb;
        fs_info->max_inline = 8192 * 1024;
        fs_info->metadata_ratio = 0;
        fs_info->defrag_inodes = RB_ROOT;
        fs_info->trans_no_join = 0;
        fs_info->free_chunk_space = 0;
+       fs_info->tree_mod_log = RB_ROOT;
 
        /* readahead state */
        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
@@ -2001,7 +2007,8 @@ int open_ctree(struct super_block *sb,
        BTRFS_I(fs_info->btree_inode)->root = tree_root;
        memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
               sizeof(struct btrfs_key));
-       BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
+       set_bit(BTRFS_INODE_DUMMY,
+               &BTRFS_I(fs_info->btree_inode)->runtime_flags);
        insert_inode_hash(fs_info->btree_inode);
 
        spin_lock_init(&fs_info->block_group_cache_lock);
@@ -2112,7 +2119,7 @@ int open_ctree(struct super_block *sb,
 
        features = btrfs_super_incompat_flags(disk_super);
        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
-       if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
+       if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
        /*
@@ -2347,12 +2354,24 @@ retry_root_backup:
                                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
        if (ret)
                goto recovery_tree_root;
-
        csum_root->track_dirty = 1;
 
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
 
+       ret = btrfs_recover_balance(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to recover balance\n");
+               goto fail_block_groups;
+       }
+
+       ret = btrfs_init_dev_stats(fs_info);
+       if (ret) {
+               printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
+                      ret);
+               goto fail_block_groups;
+       }
+
        ret = btrfs_init_space_info(fs_info);
        if (ret) {
                printk(KERN_ERR "Failed to initial space info: %d\n", ret);
@@ -2471,20 +2490,23 @@ retry_root_backup:
                goto fail_trans_kthread;
        }
 
-       if (!(sb->s_flags & MS_RDONLY)) {
-               down_read(&fs_info->cleanup_work_sem);
-               err = btrfs_orphan_cleanup(fs_info->fs_root);
-               if (!err)
-                       err = btrfs_orphan_cleanup(fs_info->tree_root);
-               up_read(&fs_info->cleanup_work_sem);
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
 
-               if (!err)
-                       err = btrfs_recover_balance(fs_info->tree_root);
+       down_read(&fs_info->cleanup_work_sem);
+       if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
+           (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
+               up_read(&fs_info->cleanup_work_sem);
+               close_ctree(tree_root);
+               return ret;
+       }
+       up_read(&fs_info->cleanup_work_sem);
 
-               if (err) {
-                       close_ctree(tree_root);
-                       return err;
-               }
+       ret = btrfs_resume_balance_async(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to resume balance\n");
+               close_ctree(tree_root);
+               return ret;
        }
 
        return 0;
@@ -2556,18 +2578,20 @@ recovery_tree_root:
 
 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
-       char b[BDEVNAME_SIZE];
-
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               printk_ratelimited(KERN_WARNING "lost page write due to "
-                                       "I/O error on %s\n",
-                                      bdevname(bh->b_bdev, b));
+               struct btrfs_device *device = (struct btrfs_device *)
+                       bh->b_private;
+
+               printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
+                                         "I/O error on %s\n",
+                                         rcu_str_deref(device->name));
                /* note, we dont' set_buffer_write_io_error because we have
                 * our own ways of dealing with the IO errors
                 */
                clear_buffer_uptodate(bh);
+               btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
        }
        unlock_buffer(bh);
        put_bh(bh);
@@ -2682,6 +2706,7 @@ static int write_dev_supers(struct btrfs_device *device,
                        set_buffer_uptodate(bh);
                        lock_buffer(bh);
                        bh->b_end_io = btrfs_end_buffer_write_sync;
+                       bh->b_private = device;
                }
 
                /*
@@ -2734,12 +2759,15 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
                wait_for_completion(&device->flush_wait);
 
                if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
-                       printk("btrfs: disabling barriers on dev %s\n",
-                              device->name);
+                       printk_in_rcu("btrfs: disabling barriers on dev %s\n",
+                                     rcu_str_deref(device->name));
                        device->nobarriers = 1;
                }
                if (!bio_flagged(bio, BIO_UPTODATE)) {
                        ret = -EIO;
+                       if (!bio_flagged(bio, BIO_EOPNOTSUPP))
+                               btrfs_dev_stat_inc_and_print(device,
+                                       BTRFS_DEV_STAT_FLUSH_ERRS);
                }
 
                /* drop the reference from the wait == 0 run */
@@ -2902,19 +2930,6 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-/* Kill all outstanding I/O */
-void btrfs_abort_devices(struct btrfs_root *root)
-{
-       struct list_head *head;
-       struct btrfs_device *dev;
-       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-       head = &root->fs_info->fs_devices->devices;
-       list_for_each_entry_rcu(dev, head, dev_list) {
-               blk_abort_queue(dev->bdev->bd_disk->queue);
-       }
-       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
-}
-
 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
 {
        spin_lock(&fs_info->fs_roots_radix_lock);
@@ -3395,7 +3410,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 
        delayed_refs = &trans->delayed_refs;
 
-again:
        spin_lock(&delayed_refs->lock);
        if (delayed_refs->num_entries == 0) {
                spin_unlock(&delayed_refs->lock);
@@ -3403,31 +3417,37 @@ again:
                return ret;
        }
 
-       node = rb_first(&delayed_refs->root);
-       while (node) {
+       while ((node = rb_first(&delayed_refs->root)) != NULL) {
                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-               node = rb_next(node);
-
-               ref->in_tree = 0;
-               rb_erase(&ref->rb_node, &delayed_refs->root);
-               delayed_refs->num_entries--;
 
                atomic_set(&ref->refs, 1);
                if (btrfs_delayed_ref_is_head(ref)) {
                        struct btrfs_delayed_ref_head *head;
 
                        head = btrfs_delayed_node_to_head(ref);
-                       spin_unlock(&delayed_refs->lock);
-                       mutex_lock(&head->mutex);
+                       if (!mutex_trylock(&head->mutex)) {
+                               atomic_inc(&ref->refs);
+                               spin_unlock(&delayed_refs->lock);
+
+                               /* Need to wait for the delayed ref to run */
+                               mutex_lock(&head->mutex);
+                               mutex_unlock(&head->mutex);
+                               btrfs_put_delayed_ref(ref);
+
+                               spin_lock(&delayed_refs->lock);
+                               continue;
+                       }
+
                        kfree(head->extent_op);
                        delayed_refs->num_heads--;
                        if (list_empty(&head->cluster))
                                delayed_refs->num_heads_ready--;
                        list_del_init(&head->cluster);
-                       mutex_unlock(&head->mutex);
-                       btrfs_put_delayed_ref(ref);
-                       goto again;
                }
+               ref->in_tree = 0;
+               rb_erase(&ref->rb_node, &delayed_refs->root);
+               delayed_refs->num_entries--;
+
                spin_unlock(&delayed_refs->lock);
                btrfs_put_delayed_ref(ref);
 
@@ -3515,11 +3535,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                             &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
                                               offset >> PAGE_CACHE_SHIFT);
                        spin_unlock(&dirty_pages->buffer_lock);
-                       if (eb) {
+                       if (eb)
                                ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
                                                         &eb->bflags);
-                               atomic_set(&eb->refs, 1);
-                       }
                        if (PageWriteback(page))
                                end_page_writeback(page);
 
@@ -3533,8 +3551,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                                spin_unlock_irq(&page->mapping->tree_lock);
                        }
 
-                       page->mapping->a_ops->invalidatepage(page, 0);
                        unlock_page(page);
+                       page_cache_release(page);
                }
        }
 
@@ -3548,8 +3566,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
        u64 start;
        u64 end;
        int ret;
+       bool loop = true;
 
        unpin = pinned_extents;
+again:
        while (1) {
                ret = find_first_extent_bit(unpin, 0, &start, &end,
                                            EXTENT_DIRTY);
@@ -3567,6 +3587,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
                cond_resched();
        }
 
+       if (loop) {
+               if (unpin == &root->fs_info->freed_extents[0])
+                       unpin = &root->fs_info->freed_extents[1];
+               else
+                       unpin = &root->fs_info->freed_extents[0];
+               loop = false;
+               goto again;
+       }
+
        return 0;
 }
 
@@ -3580,21 +3609,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
        /* FIXME: cleanup wait for commit */
        cur_trans->in_commit = 1;
        cur_trans->blocked = 1;
-       if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
-               wake_up(&root->fs_info->transaction_blocked_wait);
+       wake_up(&root->fs_info->transaction_blocked_wait);
 
        cur_trans->blocked = 0;
-       if (waitqueue_active(&root->fs_info->transaction_wait))
-               wake_up(&root->fs_info->transaction_wait);
+       wake_up(&root->fs_info->transaction_wait);
 
        cur_trans->commit_done = 1;
-       if (waitqueue_active(&cur_trans->commit_wait))
-               wake_up(&cur_trans->commit_wait);
+       wake_up(&cur_trans->commit_wait);
+
+       btrfs_destroy_delayed_inodes(root);
+       btrfs_assert_delayed_root_empty(root);
 
        btrfs_destroy_pending_snapshots(cur_trans);
 
        btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
                                     EXTENT_DIRTY);
+       btrfs_destroy_pinned_extent(root,
+                                   root->fs_info->pinned_extents);
 
        /*
        memset(cur_trans, 0, sizeof(*cur_trans));
@@ -3643,6 +3674,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
                if (waitqueue_active(&t->commit_wait))
                        wake_up(&t->commit_wait);
 
+               btrfs_destroy_delayed_inodes(root);
+               btrfs_assert_delayed_root_empty(root);
+
                btrfs_destroy_pending_snapshots(t);
 
                btrfs_destroy_delalloc_inodes(root);
@@ -3671,17 +3705,6 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
        return 0;
 }
 
-static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
-                                         u64 start, u64 end,
-                                         struct extent_state *state)
-{
-       struct super_block *sb = page->mapping->host->i_sb;
-       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
-       btrfs_error(fs_info, -EIO,
-                   "Error occured while writing out btree at %llu", start);
-       return -EIO;
-}
-
 static struct extent_io_ops btree_extent_io_ops = {
        .write_cache_pages_lock_hook = btree_lock_page_hook,
        .readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3689,5 +3712,4 @@ static struct extent_io_ops btree_extent_io_ops = {
        .submit_bio_hook = btree_submit_bio_hook,
        /* note we're sharing with inode.c for the merge bio hook */
        .merge_bio_hook = btrfs_merge_bio_hook,
-       .writepage_io_failed_hook = btree_writepage_io_failed_hook,
 };
index ab1830aaf0edbffba6a0cef86d13e9b3f2742cda..05b3fab39f7e814fc8c958e125f5a14c7e39d7f9 100644 (file)
@@ -89,7 +89,6 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 int btrfs_cleanup_transaction(struct btrfs_root *root);
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
                                  struct btrfs_root *root);
-void btrfs_abort_devices(struct btrfs_root *root);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void btrfs_init_lockdep(void);
index e887ee62b6d4ba0a98f7e2437323eecfca88bf23..614f34a899c2db468792f1ef8406c5a366739258 100644 (file)
                                             parent_root_objectid) / 4)
 #define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
 
-static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
-                          int connectable)
+static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+                          struct inode *parent)
 {
        struct btrfs_fid *fid = (struct btrfs_fid *)fh;
-       struct inode *inode = dentry->d_inode;
        int len = *max_len;
        int type;
 
-       if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
+       if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
                *max_len = BTRFS_FID_SIZE_CONNECTABLE;
                return 255;
        } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
@@ -36,19 +35,13 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        fid->root_objectid = BTRFS_I(inode)->root->objectid;
        fid->gen = inode->i_generation;
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
+       if (parent) {
                u64 parent_root_id;
 
-               spin_lock(&dentry->d_lock);
-
-               parent = dentry->d_parent->d_inode;
                fid->parent_objectid = BTRFS_I(parent)->location.objectid;
                fid->parent_gen = parent->i_generation;
                parent_root_id = BTRFS_I(parent)->root->objectid;
 
-               spin_unlock(&dentry->d_lock);
-
                if (parent_root_id != fid->root_objectid) {
                        fid->parent_root_objectid = parent_root_id;
                        len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
index 49fd7b66d57b272c7aeaea7db4b1bbd0985f8aa2..6e1d36702ff71c4fc5bde2733cbf166376b726d3 100644 (file)
@@ -2347,12 +2347,10 @@ next:
        return count;
 }
 
-
 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
-                       unsigned long num_refs)
+                              unsigned long num_refs,
+                              struct list_head *first_seq)
 {
-       struct list_head *first_seq = delayed_refs->seq_head.next;
-
        spin_unlock(&delayed_refs->lock);
        pr_debug("waiting for more refs (num %ld, first %p)\n",
                 num_refs, first_seq);
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_root *delayed_refs;
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
+       struct list_head *first_seq = NULL;
        int ret;
        u64 delayed_start;
        int run_all = count == (unsigned long)-1;
@@ -2436,8 +2435,10 @@ again:
                                 */
                                consider_waiting = 1;
                                num_refs = delayed_refs->num_entries;
+                               first_seq = root->fs_info->tree_mod_seq_list.next;
                        } else {
-                               wait_for_more_refs(delayed_refs, num_refs);
+                               wait_for_more_refs(delayed_refs,
+                                                  num_refs, first_seq);
                                /*
                                 * after waiting, things have changed. we
                                 * dropped the lock and someone else might have
@@ -3578,7 +3579,7 @@ again:
        space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
 out:
-       mutex_unlock(&extent_root->fs_info->chunk_mutex);
+       mutex_unlock(&fs_info->chunk_mutex);
        return ret;
 }
 
@@ -4355,10 +4356,9 @@ static unsigned drop_outstanding_extent(struct inode *inode)
        BTRFS_I(inode)->outstanding_extents--;
 
        if (BTRFS_I(inode)->outstanding_extents == 0 &&
-           BTRFS_I(inode)->delalloc_meta_reserved) {
+           test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                              &BTRFS_I(inode)->runtime_flags))
                drop_inode_space = 1;
-               BTRFS_I(inode)->delalloc_meta_reserved = 0;
-       }
 
        /*
         * If we have more or the same amount of outsanding extents than we have
@@ -4465,7 +4465,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
         * Add an item to reserve for updating the inode when we complete the
         * delalloc io.
         */
-       if (!BTRFS_I(inode)->delalloc_meta_reserved) {
+       if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                     &BTRFS_I(inode)->runtime_flags)) {
                nr_extents++;
                extra_reserve = 1;
        }
@@ -4511,7 +4512,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
        spin_lock(&BTRFS_I(inode)->lock);
        if (extra_reserve) {
-               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                       &BTRFS_I(inode)->runtime_flags);
                nr_extents--;
        }
        BTRFS_I(inode)->reserved_extents += nr_extents;
@@ -5217,7 +5219,7 @@ out:
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref, int for_cow)
+                          u64 parent, int last_ref)
 {
        struct btrfs_block_group_cache *cache = NULL;
        int ret;
@@ -5227,7 +5229,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                                        buf->start, buf->len,
                                        parent, root->root_key.objectid,
                                        btrfs_header_level(buf),
-                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
+                                       BTRFS_DROP_DELAYED_REF, NULL, 0);
                BUG_ON(ret); /* -ENOMEM */
        }
 
@@ -6249,7 +6251,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size, int for_cow)
+                                       u64 hint, u64 empty_size)
 {
        struct btrfs_key ins;
        struct btrfs_block_rsv *block_rsv;
@@ -6297,7 +6299,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        ins.objectid,
                                        ins.offset, parent, root_objectid,
                                        level, BTRFS_ADD_DELAYED_EXTENT,
-                                       extent_op, for_cow);
+                                       extent_op, 0);
                BUG_ON(ret); /* -ENOMEM */
        }
        return buf;
@@ -6715,7 +6717,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                               btrfs_header_owner(path->nodes[level + 1]));
        }
 
-       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
+       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
 out:
        wc->refs[level] = 0;
        wc->flags[level] = 0;
index c9018a05036e943a52ad91d81019bb4b934b6b9a..01c21b6c6d43e44a28a4c862ca6532d6f8b02654 100644 (file)
@@ -20,6 +20,7 @@
 #include "volumes.h"
 #include "check-integrity.h"
 #include "locking.h"
+#include "rcu-string.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -186,7 +187,6 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
                        return parent;
        }
 
-       entry = rb_entry(node, struct tree_entry, rb_node);
        rb_link_node(node, parent, p);
        rb_insert_color(node, root);
        return NULL;
@@ -413,7 +413,7 @@ static struct extent_state *next_state(struct extent_state *state)
 
 /*
  * utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1)
+ * it will optionally wake up any one waiting on this state (wake == 1).
  *
  * If no bits are set on the state struct after clearing things, the
  * struct is freed and removed from the tree
@@ -570,10 +570,8 @@ hit_next:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       clear_state_bit(tree, state, &bits, wake);
-                       if (last_end == (u64)-1)
-                               goto out;
-                       start = last_end + 1;
+                       state = clear_state_bit(tree, state, &bits, wake);
+                       goto next;
                }
                goto search_again;
        }
@@ -781,7 +779,6 @@ hit_next:
         * Just lock what we found and keep going
         */
        if (state->start == start && state->end <= end) {
-               struct rb_node *next_node;
                if (state->state & exclusive_bits) {
                        *failed_start = state->start;
                        err = -EEXIST;
@@ -789,20 +786,15 @@ hit_next:
                }
 
                set_state_bits(tree, state, &bits);
-
                cache_state(state, cached_state);
                merge_state(tree, state);
                if (last_end == (u64)-1)
                        goto out;
-
                start = last_end + 1;
-               next_node = rb_next(&state->rb_node);
-               if (next_node && start < end && prealloc && !need_resched()) {
-                       state = rb_entry(next_node, struct extent_state,
-                                        rb_node);
-                       if (state->start == start)
-                               goto hit_next;
-               }
+               state = next_state(state);
+               if (start < end && state && state->start == start &&
+                   !need_resched())
+                       goto hit_next;
                goto search_again;
        }
 
@@ -845,6 +837,10 @@ hit_next:
                        if (last_end == (u64)-1)
                                goto out;
                        start = last_end + 1;
+                       state = next_state(state);
+                       if (start < end && state && state->start == start &&
+                           !need_resched())
+                               goto hit_next;
                }
                goto search_again;
        }
@@ -994,21 +990,14 @@ hit_next:
         * Just lock what we found and keep going
         */
        if (state->start == start && state->end <= end) {
-               struct rb_node *next_node;
-
                set_state_bits(tree, state, &bits);
-               clear_state_bit(tree, state, &clear_bits, 0);
+               state = clear_state_bit(tree, state, &clear_bits, 0);
                if (last_end == (u64)-1)
                        goto out;
-
                start = last_end + 1;
-               next_node = rb_next(&state->rb_node);
-               if (next_node && start < end && prealloc && !need_resched()) {
-                       state = rb_entry(next_node, struct extent_state,
-                                        rb_node);
-                       if (state->start == start)
-                               goto hit_next;
-               }
+               if (start < end && state && state->start == start &&
+                   !need_resched())
+                       goto hit_next;
                goto search_again;
        }
 
@@ -1042,10 +1031,13 @@ hit_next:
                        goto out;
                if (state->end <= end) {
                        set_state_bits(tree, state, &bits);
-                       clear_state_bit(tree, state, &clear_bits, 0);
+                       state = clear_state_bit(tree, state, &clear_bits, 0);
                        if (last_end == (u64)-1)
                                goto out;
                        start = last_end + 1;
+                       if (start < end && state && state->start == start &&
+                           !need_resched())
+                               goto hit_next;
                }
                goto search_again;
        }
@@ -1173,9 +1165,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
                              cached_state, mask);
 }
 
-static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
-                                u64 end, struct extent_state **cached_state,
-                                gfp_t mask)
+int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+                         struct extent_state **cached_state, gfp_t mask)
 {
        return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
                                cached_state, mask);
@@ -1293,7 +1284,7 @@ out:
  * returned if we find something, and *start_ret and *end_ret are
  * set to reflect the state struct that was found.
  *
- * If nothing was found, 1 is returned, < 0 on error
+ * If nothing was found, 1 is returned. If found something, return 0.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
                          u64 *start_ret, u64 *end_ret, int bits)
@@ -1923,12 +1914,13 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
                /* try to remap that extent elsewhere? */
                bio_put(bio);
+               btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
                return -EIO;
        }
 
-       printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
-                       "sector %llu)\n", page->mapping->host->i_ino, start,
-                       dev->name, sector);
+       printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
+                     "(dev %s sector %llu)\n", page->mapping->host->i_ino,
+                     start, rcu_str_deref(dev->name), sector);
 
        bio_put(bio);
        return 0;
@@ -2222,17 +2214,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
                        uptodate = 0;
        }
 
-       if (!uptodate && tree->ops &&
-           tree->ops->writepage_io_failed_hook) {
-               ret = tree->ops->writepage_io_failed_hook(NULL, page,
-                                                start, end, NULL);
-               /* Writeback already completed */
-               if (ret == 0)
-                       return 1;
-       }
-
        if (!uptodate) {
-               clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
                ClearPageUptodate(page);
                SetPageError(page);
        }
@@ -2347,10 +2329,23 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
                                                              state, mirror);
-                       if (ret)
+                       if (ret) {
+                               /* no IO indicated but software detected errors
+                                * in the block, either checksum errors or
+                                * issues with the contents */
+                               struct btrfs_root *root =
+                                       BTRFS_I(page->mapping->host)->root;
+                               struct btrfs_device *device;
+
                                uptodate = 0;
-                       else
+                               device = btrfs_find_device_for_logical(
+                                               root, start, mirror);
+                               if (device)
+                                       btrfs_dev_stat_inc_and_print(device,
+                                               BTRFS_DEV_STAT_CORRUPTION_ERRS);
+                       } else {
                                clean_io_failure(start, page);
+                       }
                }
 
                if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
@@ -3164,7 +3159,7 @@ static int write_one_eb(struct extent_buffer *eb,
        u64 offset = eb->start;
        unsigned long i, num_pages;
        int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
-       int ret;
+       int ret = 0;
 
        clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
        num_pages = num_extent_pages(eb->start, eb->len);
@@ -3329,6 +3324,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                             writepage_t writepage, void *data,
                             void (*flush_fn)(void *))
 {
+       struct inode *inode = mapping->host;
        int ret = 0;
        int done = 0;
        int nr_to_write_done = 0;
@@ -3339,6 +3335,18 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
        int scanned = 0;
        int tag;
 
+       /*
+        * We have to hold onto the inode so that ordered extents can do their
+        * work when the IO finishes.  The alternative to this is failing to add
+        * an ordered extent if the igrab() fails there and that is a huge pain
+        * to deal with, so instead just hold onto the inode throughout the
+        * writepages operation.  If it fails here we are freeing up the inode
+        * anyway and we'd rather not waste our time writing out stuff that is
+        * going to be truncated anyway.
+        */
+       if (!igrab(inode))
+               return 0;
+
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
@@ -3433,6 +3441,7 @@ retry:
                index = 0;
                goto retry;
        }
+       btrfs_add_delayed_iput(inode);
        return ret;
 }
 
@@ -3930,6 +3939,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        eb->start = start;
        eb->len = len;
        eb->tree = tree;
+       eb->bflags = 0;
        rwlock_init(&eb->lock);
        atomic_set(&eb->write_locks, 0);
        atomic_set(&eb->read_locks, 0);
@@ -3967,6 +3977,60 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        return eb;
 }
 
+struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
+{
+       unsigned long i;
+       struct page *p;
+       struct extent_buffer *new;
+       unsigned long num_pages = num_extent_pages(src->start, src->len);
+
+       new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
+       if (new == NULL)
+               return NULL;
+
+       for (i = 0; i < num_pages; i++) {
+               p = alloc_page(GFP_ATOMIC);
+               BUG_ON(!p);
+               attach_extent_buffer_page(new, p);
+               WARN_ON(PageDirty(p));
+               SetPageUptodate(p);
+               new->pages[i] = p;
+       }
+
+       copy_extent_buffer(new, src, 0, 0, src->len);
+       set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
+       set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
+
+       return new;
+}
+
+struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
+{
+       struct extent_buffer *eb;
+       unsigned long num_pages = num_extent_pages(0, len);
+       unsigned long i;
+
+       eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
+       if (!eb)
+               return NULL;
+
+       for (i = 0; i < num_pages; i++) {
+               eb->pages[i] = alloc_page(GFP_ATOMIC);
+               if (!eb->pages[i])
+                       goto err;
+       }
+       set_extent_buffer_uptodate(eb);
+       btrfs_set_header_nritems(eb, 0);
+       set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+
+       return eb;
+err:
+       for (i--; i > 0; i--)
+               __free_page(eb->pages[i]);
+       __free_extent_buffer(eb);
+       return NULL;
+}
+
 static int extent_buffer_under_io(struct extent_buffer *eb)
 {
        return (atomic_read(&eb->io_pages) ||
@@ -3981,18 +4045,21 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
                                                unsigned long start_idx)
 {
        unsigned long index;
+       unsigned long num_pages;
        struct page *page;
+       int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
 
        BUG_ON(extent_buffer_under_io(eb));
 
-       index = num_extent_pages(eb->start, eb->len);
+       num_pages = num_extent_pages(eb->start, eb->len);
+       index = start_idx + num_pages;
        if (start_idx >= index)
                return;
 
        do {
                index--;
                page = extent_buffer_page(eb, index);
-               if (page) {
+               if (page && mapped) {
                        spin_lock(&page->mapping->private_lock);
                        /*
                         * We do this since we'll remove the pages after we've
@@ -4017,6 +4084,8 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
                        }
                        spin_unlock(&page->mapping->private_lock);
 
+               }
+               if (page) {
                        /* One for when we alloced the page */
                        page_cache_release(page);
                }
@@ -4235,14 +4304,18 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
 {
        WARN_ON(atomic_read(&eb->refs) == 0);
        if (atomic_dec_and_test(&eb->refs)) {
-               struct extent_io_tree *tree = eb->tree;
+               if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
+                       spin_unlock(&eb->refs_lock);
+               } else {
+                       struct extent_io_tree *tree = eb->tree;
 
-               spin_unlock(&eb->refs_lock);
+                       spin_unlock(&eb->refs_lock);
 
-               spin_lock(&tree->buffer_lock);
-               radix_tree_delete(&tree->buffer,
-                                 eb->start >> PAGE_CACHE_SHIFT);
-               spin_unlock(&tree->buffer_lock);
+                       spin_lock(&tree->buffer_lock);
+                       radix_tree_delete(&tree->buffer,
+                                         eb->start >> PAGE_CACHE_SHIFT);
+                       spin_unlock(&tree->buffer_lock);
+               }
 
                /* Should be safe to release our pages at this point */
                btrfs_release_extent_buffer_page(eb, 0);
@@ -4259,6 +4332,10 @@ void free_extent_buffer(struct extent_buffer *eb)
                return;
 
        spin_lock(&eb->refs_lock);
+       if (atomic_read(&eb->refs) == 2 &&
+           test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
+               atomic_dec(&eb->refs);
+
        if (atomic_read(&eb->refs) == 2 &&
            test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
            !extent_buffer_under_io(eb) &&
index b516c3b8dec68d825e380a1930976f34c8a3e1a4..25900af5b15d43e6bdfe0cef7c865ac2aa81bd36 100644 (file)
@@ -39,6 +39,7 @@
 #define EXTENT_BUFFER_STALE 6
 #define EXTENT_BUFFER_WRITEBACK 7
 #define EXTENT_BUFFER_IOERR 8
+#define EXTENT_BUFFER_DUMMY 9
 
 /* these are flags for extent_clear_unlock_delalloc */
 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -75,9 +76,6 @@ struct extent_io_ops {
                              unsigned long bio_flags);
        int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
        int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
-       int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
-                                       u64 start, u64 end,
-                                      struct extent_state *state);
        int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
                                    struct extent_state *state, int mirror);
        int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -225,6 +223,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
+int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+                         struct extent_state **cached_state, gfp_t mask);
 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
                   gfp_t mask);
 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -265,6 +265,8 @@ void set_page_extent_mapped(struct page *page);
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                                          u64 start, unsigned long len);
+struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
+struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
                                         u64 start, unsigned long len);
 void free_extent_buffer(struct extent_buffer *eb);
index 53bf2d764bbc4f5814db04710d3123d03c3779ba..9aa01ec2138d466f3d1726ccd6291d054c5e5c98 100644 (file)
@@ -65,6 +65,21 @@ struct inode_defrag {
        int cycled;
 };
 
+static int __compare_inode_defrag(struct inode_defrag *defrag1,
+                                 struct inode_defrag *defrag2)
+{
+       if (defrag1->root > defrag2->root)
+               return 1;
+       else if (defrag1->root < defrag2->root)
+               return -1;
+       else if (defrag1->ino > defrag2->ino)
+               return 1;
+       else if (defrag1->ino < defrag2->ino)
+               return -1;
+       else
+               return 0;
+}
+
 /* pop a record for an inode into the defrag tree.  The lock
  * must be held already
  *
@@ -81,15 +96,17 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
        struct inode_defrag *entry;
        struct rb_node **p;
        struct rb_node *parent = NULL;
+       int ret;
 
        p = &root->fs_info->defrag_inodes.rb_node;
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct inode_defrag, rb_node);
 
-               if (defrag->ino < entry->ino)
+               ret = __compare_inode_defrag(defrag, entry);
+               if (ret < 0)
                        p = &parent->rb_left;
-               else if (defrag->ino > entry->ino)
+               else if (ret > 0)
                        p = &parent->rb_right;
                else {
                        /* if we're reinserting an entry for
@@ -103,7 +120,7 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
                        goto exists;
                }
        }
-       BTRFS_I(inode)->in_defrag = 1;
+       set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
        rb_link_node(&defrag->rb_node, parent, p);
        rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
        return;
@@ -131,7 +148,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        if (btrfs_fs_closing(root->fs_info))
                return 0;
 
-       if (BTRFS_I(inode)->in_defrag)
+       if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
                return 0;
 
        if (trans)
@@ -148,7 +165,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        defrag->root = root->root_key.objectid;
 
        spin_lock(&root->fs_info->defrag_inodes_lock);
-       if (!BTRFS_I(inode)->in_defrag)
+       if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
                __btrfs_add_inode_defrag(inode, defrag);
        else
                kfree(defrag);
@@ -159,28 +176,35 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 /*
  * must be called with the defrag_inodes lock held
  */
-struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
+struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
+                                            u64 root, u64 ino,
                                             struct rb_node **next)
 {
        struct inode_defrag *entry = NULL;
+       struct inode_defrag tmp;
        struct rb_node *p;
        struct rb_node *parent = NULL;
+       int ret;
+
+       tmp.ino = ino;
+       tmp.root = root;
 
        p = info->defrag_inodes.rb_node;
        while (p) {
                parent = p;
                entry = rb_entry(parent, struct inode_defrag, rb_node);
 
-               if (ino < entry->ino)
+               ret = __compare_inode_defrag(&tmp, entry);
+               if (ret < 0)
                        p = parent->rb_left;
-               else if (ino > entry->ino)
+               else if (ret > 0)
                        p = parent->rb_right;
                else
                        return entry;
        }
 
        if (next) {
-               while (parent && ino > entry->ino) {
+               while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
                        parent = rb_next(parent);
                        entry = rb_entry(parent, struct inode_defrag, rb_node);
                }
@@ -202,6 +226,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
        struct btrfs_key key;
        struct btrfs_ioctl_defrag_range_args range;
        u64 first_ino = 0;
+       u64 root_objectid = 0;
        int num_defrag;
        int defrag_batch = 1024;
 
@@ -214,11 +239,14 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
                n = NULL;
 
                /* find an inode to defrag */
-               defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
+               defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
+                                                first_ino, &n);
                if (!defrag) {
-                       if (n)
-                               defrag = rb_entry(n, struct inode_defrag, rb_node);
-                       else if (first_ino) {
+                       if (n) {
+                               defrag = rb_entry(n, struct inode_defrag,
+                                                 rb_node);
+                       } else if (root_objectid || first_ino) {
+                               root_objectid = 0;
                                first_ino = 0;
                                continue;
                        } else {
@@ -228,6 +256,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 
                /* remove it from the rbtree */
                first_ino = defrag->ino + 1;
+               root_objectid = defrag->root;
                rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
 
                if (btrfs_fs_closing(fs_info))
@@ -252,7 +281,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
                        goto next;
 
                /* do a chunk of defrag */
-               BTRFS_I(inode)->in_defrag = 0;
+               clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
                range.start = defrag->last_offset;
                num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
                                               defrag_batch);
@@ -1305,7 +1334,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
                                    loff_t *ppos, size_t count, size_t ocount)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = fdentry(file)->d_inode;
        struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
@@ -1315,18 +1343,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
        written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
                                            count, ocount);
 
-       /*
-        * the generic O_DIRECT will update in-memory i_size after the
-        * DIOs are done.  But our endio handlers that update the on
-        * disk i_size never update past the in memory i_size.  So we
-        * need one more update here to catch any additions to the
-        * file
-        */
-       if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
-               btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
-               mark_inode_dirty(inode);
-       }
-
        if (written < 0 || written == count)
                return written;
 
@@ -1404,12 +1420,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                goto out;
        }
 
-       err = btrfs_update_time(file);
+       err = file_update_time(file);
        if (err) {
                mutex_unlock(&inode->i_mutex);
                goto out;
        }
-       BTRFS_I(inode)->sequence++;
 
        start_pos = round_down(pos, root->sectorsize);
        if (start_pos > i_size_read(inode)) {
@@ -1466,8 +1481,8 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
         * flush down new bytes that may have been written if the
         * application were using truncate to replace a file in place.
         */
-       if (BTRFS_I(inode)->ordered_data_close) {
-               BTRFS_I(inode)->ordered_data_close = 0;
+       if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                              &BTRFS_I(inode)->runtime_flags)) {
                btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
                if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
                        filemap_flush(inode->i_mapping);
@@ -1498,14 +1513,15 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        trace_btrfs_sync_file(file, datasync);
 
-       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-       if (ret)
-               return ret;
        mutex_lock(&inode->i_mutex);
 
-       /* we wait first, since the writeback may change the inode */
+       /*
+        * we wait first, since the writeback may change the inode, also wait
+        * ordered range does a filemape_write_and_wait_range which is why we
+        * don't do it above like other file systems.
+        */
        root->log_batch++;
-       btrfs_wait_ordered_range(inode, 0, (u64)-1);
+       btrfs_wait_ordered_range(inode, start, end);
        root->log_batch++;
 
        /*
@@ -1523,7 +1539,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         * syncing
         */
        smp_mb();
-       if (BTRFS_I(inode)->last_trans <=
+       if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+           BTRFS_I(inode)->last_trans <=
            root->fs_info->last_trans_committed) {
                BTRFS_I(inode)->last_trans = 0;
                mutex_unlock(&inode->i_mutex);
index 202008ec367d4c4c2cfcf73f7289692dd910b25c..6c4e2baa9290e16d06a4a8dfc5c3b05880cfeb27 100644 (file)
@@ -33,6 +33,8 @@
 
 static int link_free_space(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info);
+static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
+                             struct btrfs_free_space *info);
 
 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
                                               struct btrfs_path *path,
@@ -75,7 +77,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
                return ERR_PTR(-ENOENT);
        }
 
-       inode->i_mapping->flags &= ~__GFP_FS;
+       mapping_set_gfp_mask(inode->i_mapping,
+                       mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
 
        return inode;
 }
@@ -365,7 +368,7 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
 
 static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
 {
-       u64 *val;
+       __le64 *val;
 
        io_ctl_map_page(io_ctl, 1);
 
@@ -388,7 +391,7 @@ static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
 
 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
 {
-       u64 *gen;
+       __le64 *gen;
 
        /*
         * Skip the crc area.  If we don't check crcs then we just have a 64bit
@@ -584,6 +587,44 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
        return 0;
 }
 
+/*
+ * Since we attach pinned extents after the fact we can have contiguous sections
+ * of free space that are split up in entries.  This poses a problem with the
+ * tree logging stuff since it could have allocated across what appears to be 2
+ * entries since we would have merged the entries when adding the pinned extents
+ * back to the free space cache.  So run through the space cache that we just
+ * loaded and merge contiguous entries.  This will make the log replay stuff not
+ * blow up and it will make for nicer allocator behavior.
+ */
+static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
+{
+       struct btrfs_free_space *e, *prev = NULL;
+       struct rb_node *n;
+
+again:
+       spin_lock(&ctl->tree_lock);
+       for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
+               e = rb_entry(n, struct btrfs_free_space, offset_index);
+               if (!prev)
+                       goto next;
+               if (e->bitmap || prev->bitmap)
+                       goto next;
+               if (prev->offset + prev->bytes == e->offset) {
+                       unlink_free_space(ctl, prev);
+                       unlink_free_space(ctl, e);
+                       prev->bytes += e->bytes;
+                       kmem_cache_free(btrfs_free_space_cachep, e);
+                       link_free_space(ctl, prev);
+                       prev = NULL;
+                       spin_unlock(&ctl->tree_lock);
+                       goto again;
+               }
+next:
+               prev = e;
+       }
+       spin_unlock(&ctl->tree_lock);
+}
+
 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                            struct btrfs_free_space_ctl *ctl,
                            struct btrfs_path *path, u64 offset)
@@ -726,6 +767,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        }
 
        io_ctl_drop_pages(&io_ctl);
+       merge_space_tree(ctl);
        ret = 1;
 out:
        io_ctl_free(&io_ctl);
@@ -972,9 +1014,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                goto out;
 
 
-       ret = filemap_write_and_wait(inode->i_mapping);
-       if (ret)
-               goto out;
+       btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
        key.offset = offset;
@@ -1503,29 +1543,26 @@ again:
        end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
 
        /*
-        * XXX - this can go away after a few releases.
-        *
-        * since the only user of btrfs_remove_free_space is the tree logging
-        * stuff, and the only way to test that is under crash conditions, we
-        * want to have this debug stuff here just in case somethings not
-        * working.  Search the bitmap for the space we are trying to use to
-        * make sure its actually there.  If its not there then we need to stop
-        * because something has gone wrong.
+        * We need to search for bits in this bitmap.  We could only cover some
+        * of the extent in this bitmap thanks to how we add space, so we need
+        * to search for as much as it as we can and clear that amount, and then
+        * go searching for the next bit.
         */
        search_start = *offset;
-       search_bytes = *bytes;
+       search_bytes = ctl->unit;
        search_bytes = min(search_bytes, end - search_start + 1);
        ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
 
-       if (*offset > bitmap_info->offset && *offset + *bytes > end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
-               *bytes -= end - *offset + 1;
-               *offset = end + 1;
-       } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
-               *bytes = 0;
-       }
+       /* We may have found more bits than what we need */
+       search_bytes = min(search_bytes, *bytes);
+
+       /* Cannot clear past the end of the bitmap */
+       search_bytes = min(search_bytes, end - search_start + 1);
+
+       bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
+       *offset += search_bytes;
+       *bytes -= search_bytes;
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
@@ -1556,7 +1593,7 @@ again:
                 * everything over again.
                 */
                search_start = *offset;
-               search_bytes = *bytes;
+               search_bytes = ctl->unit;
                ret = search_bitmap(ctl, bitmap_info, &search_start,
                                    &search_bytes);
                if (ret < 0 || search_start != *offset)
@@ -1839,12 +1876,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       if (!bytes)
+               goto out_lock;
+
        info = tree_search_offset(ctl, offset, 0, 0);
        if (!info) {
                /*
@@ -1865,88 +1904,48 @@ again:
                }
        }
 
-       if (info->bytes < bytes && rb_next(&info->offset_index)) {
-               u64 end;
-               next_info = rb_entry(rb_next(&info->offset_index),
-                                            struct btrfs_free_space,
-                                            offset_index);
-
-               if (next_info->bitmap)
-                       end = next_info->offset +
-                             BITS_PER_BITMAP * ctl->unit - 1;
-               else
-                       end = next_info->offset + next_info->bytes;
-
-               if (next_info->bytes < bytes ||
-                   next_info->offset > offset || offset > end) {
-                       printk(KERN_CRIT "Found free space at %llu, size %llu,"
-                             " trying to use %llu\n",
-                             (unsigned long long)info->offset,
-                             (unsigned long long)info->bytes,
-                             (unsigned long long)bytes);
-                       WARN_ON(1);
-                       ret = -EINVAL;
-                       goto out_lock;
-               }
-
-               info = next_info;
-       }
-
-       if (info->bytes == bytes) {
-               unlink_free_space(ctl, info);
-               if (info->bitmap) {
-                       kfree(info->bitmap);
-                       ctl->total_bitmaps--;
-               }
-               kmem_cache_free(btrfs_free_space_cachep, info);
-               ret = 0;
-               goto out_lock;
-       }
-
-       if (!info->bitmap && info->offset == offset) {
+       if (!info->bitmap) {
                unlink_free_space(ctl, info);
-               info->offset += bytes;
-               info->bytes -= bytes;
-               ret = link_free_space(ctl, info);
-               WARN_ON(ret);
-               goto out_lock;
-       }
+               if (offset == info->offset) {
+                       u64 to_free = min(bytes, info->bytes);
+
+                       info->bytes -= to_free;
+                       info->offset += to_free;
+                       if (info->bytes) {
+                               ret = link_free_space(ctl, info);
+                               WARN_ON(ret);
+                       } else {
+                               kmem_cache_free(btrfs_free_space_cachep, info);
+                       }
 
-       if (!info->bitmap && info->offset <= offset &&
-           info->offset + info->bytes >= offset + bytes) {
-               u64 old_start = info->offset;
-               /*
-                * we're freeing space in the middle of the info,
-                * this can happen during tree log replay
-                *
-                * first unlink the old info and then
-                * insert it again after the hole we're creating
-                */
-               unlink_free_space(ctl, info);
-               if (offset + bytes < info->offset + info->bytes) {
-                       u64 old_end = info->offset + info->bytes;
+                       offset += to_free;
+                       bytes -= to_free;
+                       goto again;
+               } else {
+                       u64 old_end = info->bytes + info->offset;
 
-                       info->offset = offset + bytes;
-                       info->bytes = old_end - info->offset;
+                       info->bytes = offset - info->offset;
                        ret = link_free_space(ctl, info);
                        WARN_ON(ret);
                        if (ret)
                                goto out_lock;
-               } else {
-                       /* the hole we're creating ends at the end
-                        * of the info struct, just free the info
-                        */
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               }
-               spin_unlock(&ctl->tree_lock);
 
-               /* step two, insert a new info struct to cover
-                * anything before the hole
-                */
-               ret = btrfs_add_free_space(block_group, old_start,
-                                          offset - old_start);
-               WARN_ON(ret); /* -ENOMEM */
-               goto out;
+                       /* Not enough bytes in this entry to satisfy us */
+                       if (old_end < offset + bytes) {
+                               bytes -= old_end - offset;
+                               offset = old_end;
+                               goto again;
+                       } else if (old_end == offset + bytes) {
+                               /* all done */
+                               goto out_lock;
+                       }
+                       spin_unlock(&ctl->tree_lock);
+
+                       ret = btrfs_add_free_space(block_group, offset + bytes,
+                                                  old_end - (offset + bytes));
+                       WARN_ON(ret);
+                       goto out;
+               }
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
index ceb7b9c9edcc1436693178fd6d2ff62f2334ada7..a7d1921ac76b8ee5d85d563c1ceed80f4a65a2dd 100644 (file)
@@ -89,7 +89,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize);
 static int btrfs_truncate(struct inode *inode);
-static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
+static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 static noinline int cow_file_range(struct inode *inode,
                                   struct page *locked_page,
                                   u64 start, u64 end, int *page_started,
@@ -257,10 +257,13 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
        ret = insert_inline_extent(trans, root, inode, start,
                                   inline_len, compressed_size,
                                   compress_type, compressed_pages);
-       if (ret) {
+       if (ret && ret != -ENOSPC) {
                btrfs_abort_transaction(trans, root, ret);
                return ret;
+       } else if (ret == -ENOSPC) {
+               return 1;
        }
+
        btrfs_delalloc_release_metadata(inode, end + 1 - start);
        btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
        return 0;
@@ -827,7 +830,7 @@ static noinline int cow_file_range(struct inode *inode,
        if (IS_ERR(trans)) {
                extent_clear_unlock_delalloc(inode,
                             &BTRFS_I(inode)->io_tree,
-                            start, end, NULL,
+                            start, end, locked_page,
                             EXTENT_CLEAR_UNLOCK_PAGE |
                             EXTENT_CLEAR_UNLOCK |
                             EXTENT_CLEAR_DELALLOC |
@@ -960,7 +963,7 @@ out:
 out_unlock:
        extent_clear_unlock_delalloc(inode,
                     &BTRFS_I(inode)->io_tree,
-                    start, end, NULL,
+                    start, end, locked_page,
                     EXTENT_CLEAR_UNLOCK_PAGE |
                     EXTENT_CLEAR_UNLOCK |
                     EXTENT_CLEAR_DELALLOC |
@@ -983,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work)
        compress_file_range(async_cow->inode, async_cow->locked_page,
                            async_cow->start, async_cow->end, async_cow,
                            &num_added);
-       if (num_added == 0)
+       if (num_added == 0) {
+               btrfs_add_delayed_iput(async_cow->inode);
                async_cow->inode = NULL;
+       }
 }
 
 /*
@@ -1017,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
 {
        struct async_cow *async_cow;
        async_cow = container_of(work, struct async_cow, work);
+       if (async_cow->inode)
+               btrfs_add_delayed_iput(async_cow->inode);
        kfree(async_cow);
 }
 
@@ -1035,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
        while (start < end) {
                async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
                BUG_ON(!async_cow); /* -ENOMEM */
-               async_cow->inode = inode;
+               async_cow->inode = igrab(inode);
                async_cow->root = root;
                async_cow->locked_page = locked_page;
                async_cow->start = start;
@@ -1133,8 +1140,18 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
-       if (!path)
+       if (!path) {
+               extent_clear_unlock_delalloc(inode,
+                            &BTRFS_I(inode)->io_tree,
+                            start, end, locked_page,
+                            EXTENT_CLEAR_UNLOCK_PAGE |
+                            EXTENT_CLEAR_UNLOCK |
+                            EXTENT_CLEAR_DELALLOC |
+                            EXTENT_CLEAR_DIRTY |
+                            EXTENT_SET_WRITEBACK |
+                            EXTENT_END_WRITEBACK);
                return -ENOMEM;
+       }
 
        nolock = btrfs_is_free_space_inode(root, inode);
 
@@ -1144,6 +1161,15 @@ static noinline int run_delalloc_nocow(struct inode *inode,
                trans = btrfs_join_transaction(root);
 
        if (IS_ERR(trans)) {
+               extent_clear_unlock_delalloc(inode,
+                            &BTRFS_I(inode)->io_tree,
+                            start, end, locked_page,
+                            EXTENT_CLEAR_UNLOCK_PAGE |
+                            EXTENT_CLEAR_UNLOCK |
+                            EXTENT_CLEAR_DELALLOC |
+                            EXTENT_CLEAR_DIRTY |
+                            EXTENT_SET_WRITEBACK |
+                            EXTENT_END_WRITEBACK);
                btrfs_free_path(path);
                return PTR_ERR(trans);
        }
@@ -1324,8 +1350,11 @@ out_check:
        }
        btrfs_release_path(path);
 
-       if (cur_offset <= end && cow_start == (u64)-1)
+       if (cur_offset <= end && cow_start == (u64)-1) {
                cow_start = cur_offset;
+               cur_offset = end;
+       }
+
        if (cow_start != (u64)-1) {
                ret = cow_file_range(inode, locked_page, cow_start, end,
                                     page_started, nr_written, 1);
@@ -1344,6 +1373,17 @@ error:
        if (!ret)
                ret = err;
 
+       if (ret && cur_offset < end)
+               extent_clear_unlock_delalloc(inode,
+                            &BTRFS_I(inode)->io_tree,
+                            cur_offset, end, locked_page,
+                            EXTENT_CLEAR_UNLOCK_PAGE |
+                            EXTENT_CLEAR_UNLOCK |
+                            EXTENT_CLEAR_DELALLOC |
+                            EXTENT_CLEAR_DIRTY |
+                            EXTENT_SET_WRITEBACK |
+                            EXTENT_END_WRITEBACK);
+
        btrfs_free_path(path);
        return ret;
 }
@@ -1358,20 +1398,23 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
        int ret;
        struct btrfs_root *root = BTRFS_I(inode)->root;
 
-       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
+       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, 1, nr_written);
-       else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
+       } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, 0, nr_written);
-       else if (!btrfs_test_opt(root, COMPRESS) &&
-                !(BTRFS_I(inode)->force_compress) &&
-                !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
+       else if (!btrfs_test_opt(root, COMPRESS) &&
+                  !(BTRFS_I(inode)->force_compress) &&
+                  !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
                ret = cow_file_range(inode, locked_page, start, end,
                                      page_started, nr_written, 1);
-       else
+       } else {
+               set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                       &BTRFS_I(inode)->runtime_flags);
                ret = cow_file_range_async(inode, locked_page, start, end,
                                           page_started, nr_written);
+       }
        return ret;
 }
 
@@ -1572,11 +1615,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
        if (btrfs_is_free_space_inode(root, inode))
                metadata = 2;
 
-       ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
-       if (ret)
-               return ret;
-
        if (!(rw & REQ_WRITE)) {
+               ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
+               if (ret)
+                       return ret;
+
                if (bio_flags & EXTENT_BIO_COMPRESSED) {
                        return btrfs_submit_compressed_read(inode, bio,
                                                    mirror_num, bio_flags);
@@ -1815,25 +1858,24 @@ out:
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
  */
-static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
+static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 {
+       struct inode *inode = ordered_extent->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans = NULL;
-       struct btrfs_ordered_extent *ordered_extent = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_state *cached_state = NULL;
        int compress_type = 0;
        int ret;
        bool nolock;
 
-       ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
-                                            end - start + 1);
-       if (!ret)
-               return 0;
-       BUG_ON(!ordered_extent); /* Logic error */
-
        nolock = btrfs_is_free_space_inode(root, inode);
 
+       if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
+               ret = -EIO;
+               goto out;
+       }
+
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
                BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
                ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -1889,12 +1931,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                                   ordered_extent->file_offset,
                                   ordered_extent->len);
        }
-       unlock_extent_cached(io_tree, ordered_extent->file_offset,
-                            ordered_extent->file_offset +
-                            ordered_extent->len - 1, &cached_state, GFP_NOFS);
+
        if (ret < 0) {
                btrfs_abort_transaction(trans, root, ret);
-               goto out;
+               goto out_unlock;
        }
 
        add_pending_csums(trans, inode, ordered_extent->file_offset,
@@ -1905,10 +1945,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                ret = btrfs_update_inode_fallback(trans, root, inode);
                if (ret) { /* -ENOMEM or corruption */
                        btrfs_abort_transaction(trans, root, ret);
-                       goto out;
+                       goto out_unlock;
                }
        }
        ret = 0;
+out_unlock:
+       unlock_extent_cached(io_tree, ordered_extent->file_offset,
+                            ordered_extent->file_offset +
+                            ordered_extent->len - 1, &cached_state, GFP_NOFS);
 out:
        if (root != root->fs_info->tree_root)
                btrfs_delalloc_release_metadata(inode, ordered_extent->len);
@@ -1919,26 +1963,57 @@ out:
                        btrfs_end_transaction(trans, root);
        }
 
+       if (ret)
+               clear_extent_uptodate(io_tree, ordered_extent->file_offset,
+                                     ordered_extent->file_offset +
+                                     ordered_extent->len - 1, NULL, GFP_NOFS);
+
+       /*
+        * This needs to be dont to make sure anybody waiting knows we are done
+        * upating everything for this ordered extent.
+        */
+       btrfs_remove_ordered_extent(inode, ordered_extent);
+
        /* once for us */
        btrfs_put_ordered_extent(ordered_extent);
        /* once for the tree */
        btrfs_put_ordered_extent(ordered_extent);
 
-       return 0;
-out_unlock:
-       unlock_extent_cached(io_tree, ordered_extent->file_offset,
-                            ordered_extent->file_offset +
-                            ordered_extent->len - 1, &cached_state, GFP_NOFS);
-       goto out;
+       return ret;
+}
+
+static void finish_ordered_fn(struct btrfs_work *work)
+{
+       struct btrfs_ordered_extent *ordered_extent;
+       ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
+       btrfs_finish_ordered_io(ordered_extent);
 }
 
 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
                                struct extent_state *state, int uptodate)
 {
+       struct inode *inode = page->mapping->host;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_ordered_extent *ordered_extent = NULL;
+       struct btrfs_workers *workers;
+
        trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
 
        ClearPagePrivate2(page);
-       return btrfs_finish_ordered_io(page->mapping->host, start, end);
+       if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
+                                           end - start + 1, uptodate))
+               return 0;
+
+       ordered_extent->work.func = finish_ordered_fn;
+       ordered_extent->work.flags = 0;
+
+       if (btrfs_is_free_space_inode(root, inode))
+               workers = &root->fs_info->endio_freespace_worker;
+       else
+               workers = &root->fs_info->endio_write_workers;
+       btrfs_queue_worker(workers, &ordered_extent->work);
+
+       return 0;
 }
 
 /*
@@ -2072,12 +2147,12 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
        struct btrfs_block_rsv *block_rsv;
        int ret;
 
-       if (!list_empty(&root->orphan_list) ||
+       if (atomic_read(&root->orphan_inodes) ||
            root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
                return;
 
        spin_lock(&root->orphan_lock);
-       if (!list_empty(&root->orphan_list)) {
+       if (atomic_read(&root->orphan_inodes)) {
                spin_unlock(&root->orphan_lock);
                return;
        }
@@ -2134,8 +2209,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
                block_rsv = NULL;
        }
 
-       if (list_empty(&BTRFS_I(inode)->i_orphan)) {
-               list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+       if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                             &BTRFS_I(inode)->runtime_flags)) {
 #if 0
                /*
                 * For proper ENOSPC handling, we should do orphan
@@ -2148,12 +2223,12 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
                        insert = 1;
 #endif
                insert = 1;
+               atomic_dec(&root->orphan_inodes);
        }
 
-       if (!BTRFS_I(inode)->orphan_meta_reserved) {
-               BTRFS_I(inode)->orphan_meta_reserved = 1;
+       if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+                             &BTRFS_I(inode)->runtime_flags))
                reserve = 1;
-       }
        spin_unlock(&root->orphan_lock);
 
        /* grab metadata reservation from transaction handle */
@@ -2166,6 +2241,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        if (insert >= 1) {
                ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
                if (ret && ret != -EEXIST) {
+                       clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                                 &BTRFS_I(inode)->runtime_flags);
                        btrfs_abort_transaction(trans, root, ret);
                        return ret;
                }
@@ -2196,15 +2273,13 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
        int ret = 0;
 
        spin_lock(&root->orphan_lock);
-       if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
-               list_del_init(&BTRFS_I(inode)->i_orphan);
+       if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                              &BTRFS_I(inode)->runtime_flags))
                delete_item = 1;
-       }
 
-       if (BTRFS_I(inode)->orphan_meta_reserved) {
-               BTRFS_I(inode)->orphan_meta_reserved = 0;
+       if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+                              &BTRFS_I(inode)->runtime_flags))
                release_rsv = 1;
-       }
        spin_unlock(&root->orphan_lock);
 
        if (trans && delete_item) {
@@ -2212,8 +2287,10 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
                BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
        }
 
-       if (release_rsv)
+       if (release_rsv) {
                btrfs_orphan_release_metadata(inode);
+               atomic_dec(&root->orphan_inodes);
+       }
 
        return 0;
 }
@@ -2341,6 +2418,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                ret = PTR_ERR(trans);
                                goto out;
                        }
+                       printk(KERN_ERR "auto deleting %Lu\n",
+                              found_key.objectid);
                        ret = btrfs_del_orphan_item(trans, root,
                                                    found_key.objectid);
                        BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
@@ -2352,9 +2431,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                 * add this inode to the orphan list so btrfs_orphan_del does
                 * the proper thing when we hit it
                 */
-               spin_lock(&root->orphan_lock);
-               list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
-               spin_unlock(&root->orphan_lock);
+               set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                       &BTRFS_I(inode)->runtime_flags);
 
                /* if we have links, this was a truncate, lets do that */
                if (inode->i_nlink) {
@@ -2510,7 +2588,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
 
        inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
        BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
-       BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
+       inode->i_version = btrfs_inode_sequence(leaf, inode_item);
        inode->i_generation = BTRFS_I(inode)->generation;
        inode->i_rdev = 0;
        rdev = btrfs_inode_rdev(leaf, inode_item);
@@ -2594,7 +2672,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
 
        btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
        btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
-       btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
+       btrfs_set_inode_sequence(leaf, item, inode->i_version);
        btrfs_set_inode_transid(leaf, item, trans->transid);
        btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
        btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
@@ -2752,6 +2830,8 @@ err:
                goto out;
 
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+       inode_inc_iversion(inode);
+       inode_inc_iversion(dir);
        inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
        btrfs_update_inode(trans, root, dir);
 out:
@@ -3089,6 +3169,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        }
 
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+       inode_inc_iversion(dir);
        dir->i_mtime = dir->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, dir);
        if (ret)
@@ -3607,7 +3688,8 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                 * any new writes get down to disk quickly.
                 */
                if (newsize == 0)
-                       BTRFS_I(inode)->ordered_data_close = 1;
+                       set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                               &BTRFS_I(inode)->runtime_flags);
 
                /* we don't support swapfiles, so vmtruncate shouldn't fail */
                truncate_setsize(inode, newsize);
@@ -3638,6 +3720,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (attr->ia_valid) {
                setattr_copy(inode, attr);
+               inode_inc_iversion(inode);
                err = btrfs_dirty_inode(inode);
 
                if (!err && attr->ia_valid & ATTR_MODE)
@@ -3671,7 +3754,8 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        if (root->fs_info->log_root_recovering) {
-               BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
+               BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                                &BTRFS_I(inode)->runtime_flags));
                goto no_delete;
        }
 
@@ -4066,7 +4150,7 @@ static struct inode *new_simple_dir(struct super_block *s,
 
        BTRFS_I(inode)->root = root;
        memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
-       BTRFS_I(inode)->dummy_inode = 1;
+       set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
 
        inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
        inode->i_op = &btrfs_dir_ro_inode_operations;
@@ -4370,7 +4454,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
        int ret = 0;
        bool nolock = false;
 
-       if (BTRFS_I(inode)->dummy_inode)
+       if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
                return 0;
 
        if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
@@ -4403,7 +4487,7 @@ int btrfs_dirty_inode(struct inode *inode)
        struct btrfs_trans_handle *trans;
        int ret;
 
-       if (BTRFS_I(inode)->dummy_inode)
+       if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
                return 0;
 
        trans = btrfs_join_transaction(root);
@@ -4431,46 +4515,18 @@ int btrfs_dirty_inode(struct inode *inode)
  * This is a copy of file_update_time.  We need this so we can return error on
  * ENOSPC for updating the inode in the case of file write and mmap writes.
  */
-int btrfs_update_time(struct file *file)
+static int btrfs_update_time(struct inode *inode, struct timespec *now,
+                            int flags)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
-       struct timespec now;
-       int ret;
-       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
-
-       /* First try to exhaust all avenues to not sync */
-       if (IS_NOCMTIME(inode))
-               return 0;
-
-       now = current_fs_time(inode->i_sb);
-       if (!timespec_equal(&inode->i_mtime, &now))
-               sync_it = S_MTIME;
-
-       if (!timespec_equal(&inode->i_ctime, &now))
-               sync_it |= S_CTIME;
-
-       if (IS_I_VERSION(inode))
-               sync_it |= S_VERSION;
-
-       if (!sync_it)
-               return 0;
-
-       /* Finally allowed to write? Takes lock. */
-       if (mnt_want_write_file(file))
-               return 0;
-
-       /* Only change inode inside the lock region */
-       if (sync_it & S_VERSION)
+       if (flags & S_VERSION)
                inode_inc_iversion(inode);
-       if (sync_it & S_CTIME)
-               inode->i_ctime = now;
-       if (sync_it & S_MTIME)
-               inode->i_mtime = now;
-       ret = btrfs_dirty_inode(inode);
-       if (!ret)
-               mark_inode_dirty_sync(inode);
-       mnt_drop_write(file->f_path.mnt);
-       return ret;
+       if (flags & S_CTIME)
+               inode->i_ctime = *now;
+       if (flags & S_MTIME)
+               inode->i_mtime = *now;
+       if (flags & S_ATIME)
+               inode->i_atime = *now;
+       return btrfs_dirty_inode(inode);
 }
 
 /*
@@ -4730,6 +4786,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
 
        btrfs_i_size_write(parent_inode, parent_inode->i_size +
                           name_len * 2);
+       inode_inc_iversion(parent_inode);
        parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, parent_inode);
        if (ret)
@@ -4937,6 +4994,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        }
 
        btrfs_inc_nlink(inode);
+       inode_inc_iversion(inode);
        inode->i_ctime = CURRENT_TIME;
        ihold(inode);
 
@@ -5818,8 +5876,17 @@ map:
        bh_result->b_size = len;
        bh_result->b_bdev = em->bdev;
        set_buffer_mapped(bh_result);
-       if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
-               set_buffer_new(bh_result);
+       if (create) {
+               if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+                       set_buffer_new(bh_result);
+
+               /*
+                * Need to update the i_size under the extent lock so buffered
+                * readers will get the updated i_size when we unlock.
+                */
+               if (start + len > i_size_read(inode))
+                       i_size_write(inode, start + len);
+       }
 
        free_extent_map(em);
 
@@ -5903,9 +5970,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
        struct btrfs_dio_private *dip = bio->bi_private;
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct btrfs_trans_handle *trans;
        struct btrfs_ordered_extent *ordered = NULL;
-       struct extent_state *cached_state = NULL;
        u64 ordered_offset = dip->logical_offset;
        u64 ordered_bytes = dip->bytes;
        int ret;
@@ -5915,73 +5980,14 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
 again:
        ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
                                                   &ordered_offset,
-                                                  ordered_bytes);
+                                                  ordered_bytes, !err);
        if (!ret)
                goto out_test;
 
-       BUG_ON(!ordered);
-
-       trans = btrfs_join_transaction(root);
-       if (IS_ERR(trans)) {
-               err = -ENOMEM;
-               goto out;
-       }
-       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
-
-       if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
-               ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-               if (!ret)
-                       err = btrfs_update_inode_fallback(trans, root, inode);
-               goto out;
-       }
-
-       lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
-                        ordered->file_offset + ordered->len - 1, 0,
-                        &cached_state);
-
-       if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
-               ret = btrfs_mark_extent_written(trans, inode,
-                                               ordered->file_offset,
-                                               ordered->file_offset +
-                                               ordered->len);
-               if (ret) {
-                       err = ret;
-                       goto out_unlock;
-               }
-       } else {
-               ret = insert_reserved_file_extent(trans, inode,
-                                                 ordered->file_offset,
-                                                 ordered->start,
-                                                 ordered->disk_len,
-                                                 ordered->len,
-                                                 ordered->len,
-                                                 0, 0, 0,
-                                                 BTRFS_FILE_EXTENT_REG);
-               unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
-                                  ordered->file_offset, ordered->len);
-               if (ret) {
-                       err = ret;
-                       WARN_ON(1);
-                       goto out_unlock;
-               }
-       }
-
-       add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
-       ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-       if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
-               btrfs_update_inode_fallback(trans, root, inode);
-       ret = 0;
-out_unlock:
-       unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
-                            ordered->file_offset + ordered->len - 1,
-                            &cached_state, GFP_NOFS);
-out:
-       btrfs_delalloc_release_metadata(inode, ordered->len);
-       btrfs_end_transaction(trans, root);
-       ordered_offset = ordered->file_offset + ordered->len;
-       btrfs_put_ordered_extent(ordered);
-       btrfs_put_ordered_extent(ordered);
-
+       ordered->work.func = finish_ordered_fn;
+       ordered->work.flags = 0;
+       btrfs_queue_worker(&root->fs_info->endio_write_workers,
+                          &ordered->work);
 out_test:
        /*
         * our bio might span multiple ordered extents.  If we haven't
@@ -5990,12 +5996,12 @@ out_test:
        if (ordered_offset < dip->logical_offset + dip->bytes) {
                ordered_bytes = dip->logical_offset + dip->bytes -
                        ordered_offset;
+               ordered = NULL;
                goto again;
        }
 out_done:
        bio->bi_private = dip->private;
 
-       kfree(dip->csums);
        kfree(dip);
 
        /* If we had an error make sure to clear the uptodate flag */
@@ -6063,9 +6069,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
        int ret;
 
        bio_get(bio);
-       ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
-       if (ret)
-               goto err;
+
+       if (!write) {
+               ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+               if (ret)
+                       goto err;
+       }
 
        if (skip_sum)
                goto map;
@@ -6360,12 +6369,48 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                 */
                ordered = btrfs_lookup_ordered_range(inode, lockstart,
                                                     lockend - lockstart + 1);
-               if (!ordered)
+
+               /*
+                * We need to make sure there are no buffered pages in this
+                * range either, we could have raced between the invalidate in
+                * generic_file_direct_write and locking the extent.  The
+                * invalidate needs to happen so that reads after a write do not
+                * get stale data.
+                */
+               if (!ordered && (!writing ||
+                   !test_range_bit(&BTRFS_I(inode)->io_tree,
+                                   lockstart, lockend, EXTENT_UPTODATE, 0,
+                                   cached_state)))
                        break;
+
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                                     &cached_state, GFP_NOFS);
-               btrfs_start_ordered_extent(inode, ordered, 1);
-               btrfs_put_ordered_extent(ordered);
+
+               if (ordered) {
+                       btrfs_start_ordered_extent(inode, ordered, 1);
+                       btrfs_put_ordered_extent(ordered);
+               } else {
+                       /* Screw you mmap */
+                       ret = filemap_write_and_wait_range(file->f_mapping,
+                                                          lockstart,
+                                                          lockend);
+                       if (ret)
+                               goto out;
+
+                       /*
+                        * If we found a page that couldn't be invalidated just
+                        * fall back to buffered.
+                        */
+                       ret = invalidate_inode_pages2_range(file->f_mapping,
+                                       lockstart >> PAGE_CACHE_SHIFT,
+                                       lockend >> PAGE_CACHE_SHIFT);
+                       if (ret) {
+                               if (ret == -EBUSY)
+                                       ret = 0;
+                               goto out;
+                       }
+               }
+
                cond_resched();
        }
 
@@ -6485,13 +6530,13 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
 
 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
 {
+       struct inode *inode = page->mapping->host;
        struct extent_io_tree *tree;
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        u64 page_start = page_offset(page);
        u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
 
-
        /*
         * we have the page locked, so new writeback can't start,
         * and the dirty bit won't be cleared while we are here.
@@ -6501,13 +6546,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
         */
        wait_on_page_writeback(page);
 
-       tree = &BTRFS_I(page->mapping->host)->io_tree;
+       tree = &BTRFS_I(inode)->io_tree;
        if (offset) {
                btrfs_releasepage(page, GFP_NOFS);
                return;
        }
        lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
-       ordered = btrfs_lookup_ordered_extent(page->mapping->host,
+       ordered = btrfs_lookup_ordered_extent(inode,
                                           page_offset(page));
        if (ordered) {
                /*
@@ -6522,9 +6567,10 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
                 * whoever cleared the private bit is responsible
                 * for the finish_ordered_io
                 */
-               if (TestClearPagePrivate2(page)) {
-                       btrfs_finish_ordered_io(page->mapping->host,
-                                               page_start, page_end);
+               if (TestClearPagePrivate2(page) &&
+                   btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
+                                                  PAGE_CACHE_SIZE, 1)) {
+                       btrfs_finish_ordered_io(ordered);
                }
                btrfs_put_ordered_extent(ordered);
                cached_state = NULL;
@@ -6576,7 +6622,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
        if (!ret) {
-               ret = btrfs_update_time(vma->vm_file);
+               ret = file_update_time(vma->vm_file);
                reserved = 1;
        }
        if (ret) {
@@ -6771,7 +6817,8 @@ static int btrfs_truncate(struct inode *inode)
         * using truncate to replace the contents of the file will
         * end up with a zero length file after a crash.
         */
-       if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
+       if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                                          &BTRFS_I(inode)->runtime_flags))
                btrfs_add_ordered_operation(trans, root, inode);
 
        while (1) {
@@ -6894,7 +6941,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->root = NULL;
        ei->space_info = NULL;
        ei->generation = 0;
-       ei->sequence = 0;
        ei->last_trans = 0;
        ei->last_sub_trans = 0;
        ei->logged_trans = 0;
@@ -6909,11 +6955,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->outstanding_extents = 0;
        ei->reserved_extents = 0;
 
-       ei->ordered_data_close = 0;
-       ei->orphan_meta_reserved = 0;
-       ei->dummy_inode = 0;
-       ei->in_defrag = 0;
-       ei->delalloc_meta_reserved = 0;
+       ei->runtime_flags = 0;
        ei->force_compress = BTRFS_COMPRESS_NONE;
 
        ei->delayed_node = NULL;
@@ -6927,7 +6969,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        mutex_init(&ei->log_mutex);
        mutex_init(&ei->delalloc_mutex);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
-       INIT_LIST_HEAD(&ei->i_orphan);
        INIT_LIST_HEAD(&ei->delalloc_inodes);
        INIT_LIST_HEAD(&ei->ordered_operations);
        RB_CLEAR_NODE(&ei->rb_node);
@@ -6972,13 +7013,12 @@ void btrfs_destroy_inode(struct inode *inode)
                spin_unlock(&root->fs_info->ordered_extent_lock);
        }
 
-       spin_lock(&root->orphan_lock);
-       if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
+       if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                    &BTRFS_I(inode)->runtime_flags)) {
                printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
                       (unsigned long long)btrfs_ino(inode));
-               list_del_init(&BTRFS_I(inode)->i_orphan);
+               atomic_dec(&root->orphan_inodes);
        }
-       spin_unlock(&root->orphan_lock);
 
        while (1) {
                ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
@@ -7099,10 +7139,13 @@ static void fixup_inode_flags(struct inode *dir, struct inode *inode)
        else
                b_inode->flags &= ~BTRFS_INODE_NODATACOW;
 
-       if (b_dir->flags & BTRFS_INODE_COMPRESS)
+       if (b_dir->flags & BTRFS_INODE_COMPRESS) {
                b_inode->flags |= BTRFS_INODE_COMPRESS;
-       else
-               b_inode->flags &= ~BTRFS_INODE_COMPRESS;
+               b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+       } else {
+               b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
+                                   BTRFS_INODE_NOCOMPRESS);
+       }
 }
 
 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -7193,6 +7236,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
                btrfs_add_ordered_operation(trans, root, old_inode);
 
+       inode_inc_iversion(old_dir);
+       inode_inc_iversion(new_dir);
+       inode_inc_iversion(old_inode);
        old_dir->i_ctime = old_dir->i_mtime = ctime;
        new_dir->i_ctime = new_dir->i_mtime = ctime;
        old_inode->i_ctime = ctime;
@@ -7219,6 +7265,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        }
 
        if (new_inode) {
+               inode_inc_iversion(new_inode);
                new_inode->i_ctime = CURRENT_TIME;
                if (unlikely(btrfs_ino(new_inode) ==
                             BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -7490,6 +7537,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                cur_offset += ins.offset;
                *alloc_hint = ins.objectid + ins.offset;
 
+               inode_inc_iversion(inode);
                inode->i_ctime = CURRENT_TIME;
                BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
                if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -7647,6 +7695,7 @@ static const struct inode_operations btrfs_file_inode_operations = {
        .permission     = btrfs_permission,
        .fiemap         = btrfs_fiemap,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 static const struct inode_operations btrfs_special_inode_operations = {
        .getattr        = btrfs_getattr,
@@ -7657,6 +7706,7 @@ static const struct inode_operations btrfs_special_inode_operations = {
        .listxattr      = btrfs_listxattr,
        .removexattr    = btrfs_removexattr,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 static const struct inode_operations btrfs_symlink_inode_operations = {
        .readlink       = generic_readlink,
@@ -7670,6 +7720,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
        .listxattr      = btrfs_listxattr,
        .removexattr    = btrfs_removexattr,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 
 const struct dentry_operations btrfs_dentry_operations = {
index 14f8e1faa46ee0478ebb83d6f82d205d25c1dc51..0e92e5763005214b5d55d091d6fcfb94d73f4875 100644 (file)
@@ -52,6 +52,7 @@
 #include "locking.h"
 #include "inode-map.h"
 #include "backref.h"
+#include "rcu-string.h"
 
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -261,6 +262,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        }
 
        btrfs_update_iflags(inode);
+       inode_inc_iversion(inode);
        inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
 
@@ -367,7 +369,7 @@ static noinline int create_subvol(struct btrfs_root *root,
                return PTR_ERR(trans);
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
-                                     0, objectid, NULL, 0, 0, 0, 0);
+                                     0, objectid, NULL, 0, 0, 0);
        if (IS_ERR(leaf)) {
                ret = PTR_ERR(leaf);
                goto fail;
@@ -784,39 +786,57 @@ none:
        return -ENOENT;
 }
 
-/*
- * Validaty check of prev em and next em:
- * 1) no prev/next em
- * 2) prev/next em is an hole/inline extent
- */
-static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
+static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
 {
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-       struct extent_map *prev = NULL, *next = NULL;
-       int ret = 0;
+       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+       struct extent_map *em;
+       u64 len = PAGE_CACHE_SIZE;
 
+       /*
+        * hopefully we have this extent in the tree already, try without
+        * the full extent lock
+        */
        read_lock(&em_tree->lock);
-       prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
-       next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
+       em = lookup_extent_mapping(em_tree, start, len);
        read_unlock(&em_tree->lock);
 
-       if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
-           (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
-               ret = 1;
-       free_extent_map(prev);
-       free_extent_map(next);
+       if (!em) {
+               /* get the big lock and read metadata off disk */
+               lock_extent(io_tree, start, start + len - 1);
+               em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
+               unlock_extent(io_tree, start, start + len - 1);
+
+               if (IS_ERR(em))
+                       return NULL;
+       }
+
+       return em;
+}
 
+static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
+{
+       struct extent_map *next;
+       bool ret = true;
+
+       /* this is the last extent */
+       if (em->start + em->len >= i_size_read(inode))
+               return false;
+
+       next = defrag_lookup_extent(inode, em->start + em->len);
+       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+               ret = false;
+
+       free_extent_map(next);
        return ret;
 }
 
-static int should_defrag_range(struct inode *inode, u64 start, u64 len,
-                              int thresh, u64 *last_len, u64 *skip,
-                              u64 *defrag_end)
+static int should_defrag_range(struct inode *inode, u64 start, int thresh,
+                              u64 *last_len, u64 *skip, u64 *defrag_end)
 {
-       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-       struct extent_map *em = NULL;
-       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       struct extent_map *em;
        int ret = 1;
+       bool next_mergeable = true;
 
        /*
         * make sure that once we start defragging an extent, we keep on
@@ -827,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
 
        *skip = 0;
 
-       /*
-        * hopefully we have this extent in the tree already, try without
-        * the full extent lock
-        */
-       read_lock(&em_tree->lock);
-       em = lookup_extent_mapping(em_tree, start, len);
-       read_unlock(&em_tree->lock);
-
-       if (!em) {
-               /* get the big lock and read metadata off disk */
-               lock_extent(io_tree, start, start + len - 1);
-               em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
-               unlock_extent(io_tree, start, start + len - 1);
-
-               if (IS_ERR(em))
-                       return 0;
-       }
+       em = defrag_lookup_extent(inode, start);
+       if (!em)
+               return 0;
 
        /* this will cover holes, and inline extents */
        if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
@@ -851,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
                goto out;
        }
 
-       /* If we have nothing to merge with us, just skip. */
-       if (check_adjacent_extents(inode, em)) {
-               ret = 0;
-               goto out;
-       }
+       next_mergeable = defrag_check_next_extent(inode, em);
 
        /*
-        * we hit a real extent, if it is big don't bother defragging it again
+        * we hit a real extent, if it is big or the next extent is not a
+        * real extent, don't bother defragging it
         */
-       if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
+       if ((*last_len == 0 || *last_len >= thresh) &&
+           (em->len >= thresh || !next_mergeable))
                ret = 0;
-
 out:
        /*
         * last_len ends up being a counter of how many bytes we've defragged.
@@ -1141,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                        break;
 
                if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
-                                        PAGE_CACHE_SIZE, extent_thresh,
-                                        &last_len, &skip, &defrag_end)) {
+                                        extent_thresh, &last_len, &skip,
+                                        &defrag_end)) {
                        unsigned long next;
                        /*
                         * the should_defrag function tells us how much to skip
@@ -1303,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                ret = -EINVAL;
                goto out_free;
        }
+       if (device->fs_devices && device->fs_devices->seeding) {
+               printk(KERN_INFO "btrfs: resizer unable to apply on "
+                      "seeding device %llu\n",
+                      (unsigned long long)devid);
+               ret = -EINVAL;
+               goto out_free;
+       }
+
        if (!strcmp(sizestr, "max"))
                new_size = device->bdev->bd_inode->i_size;
        else {
@@ -1344,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
        do_div(new_size, root->sectorsize);
        new_size *= root->sectorsize;
 
-       printk(KERN_INFO "btrfs: new size for %s is %llu\n",
-               device->name, (unsigned long long)new_size);
+       printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n",
+                     rcu_str_deref(device->name),
+                     (unsigned long long)new_size);
 
        if (new_size > old_size) {
                trans = btrfs_start_transaction(root, 0);
@@ -2262,10 +2274,17 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
        di_args->bytes_used = dev->bytes_used;
        di_args->total_bytes = dev->total_bytes;
        memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
-       if (dev->name)
-               strncpy(di_args->path, dev->name, sizeof(di_args->path));
-       else
+       if (dev->name) {
+               struct rcu_string *name;
+
+               rcu_read_lock();
+               name = rcu_dereference(dev->name);
+               strncpy(di_args->path, name->str, sizeof(di_args->path));
+               rcu_read_unlock();
+               di_args->path[sizeof(di_args->path) - 1] = 0;
+       } else {
                di_args->path[0] = '\0';
+       }
 
 out:
        if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
@@ -2622,6 +2641,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        btrfs_mark_buffer_dirty(leaf);
                        btrfs_release_path(path);
 
+                       inode_inc_iversion(inode);
                        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 
                        /*
@@ -2914,7 +2934,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
                up_read(&info->groups_sem);
        }
 
-       user_dest = (struct btrfs_ioctl_space_info *)
+       user_dest = (struct btrfs_ioctl_space_info __user *)
                (arg + sizeof(struct btrfs_ioctl_space_args));
 
        if (copy_to_user(user_dest, dest_orig, alloc_size))
@@ -3042,6 +3062,28 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
        return ret;
 }
 
+static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
+                                     void __user *arg, int reset_after_read)
+{
+       struct btrfs_ioctl_get_dev_stats *sa;
+       int ret;
+
+       if (reset_after_read && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       sa = memdup_user(arg, sizeof(*sa));
+       if (IS_ERR(sa))
+               return PTR_ERR(sa);
+
+       ret = btrfs_get_dev_stats(root, sa, reset_after_read);
+
+       if (copy_to_user(arg, sa, sizeof(*sa)))
+               ret = -EFAULT;
+
+       kfree(sa);
+       return ret;
+}
+
 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
 {
        int ret = 0;
@@ -3212,8 +3254,9 @@ void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
        }
 }
 
-static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_balance(struct file *file, void __user *arg)
 {
+       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_ioctl_balance_args *bargs;
        struct btrfs_balance_control *bctl;
@@ -3225,6 +3268,10 @@ static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
        if (fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
+       ret = mnt_want_write(file->f_path.mnt);
+       if (ret)
+               return ret;
+
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
@@ -3291,6 +3338,7 @@ out_bargs:
 out:
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+       mnt_drop_write(file->f_path.mnt);
        return ret;
 }
 
@@ -3386,7 +3434,7 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_DEV_INFO:
                return btrfs_ioctl_dev_info(root, argp);
        case BTRFS_IOC_BALANCE:
-               return btrfs_ioctl_balance(root, NULL);
+               return btrfs_ioctl_balance(file, NULL);
        case BTRFS_IOC_CLONE:
                return btrfs_ioctl_clone(file, arg, 0, 0, 0);
        case BTRFS_IOC_CLONE_RANGE:
@@ -3419,11 +3467,15 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_SCRUB_PROGRESS:
                return btrfs_ioctl_scrub_progress(root, argp);
        case BTRFS_IOC_BALANCE_V2:
-               return btrfs_ioctl_balance(root, argp);
+               return btrfs_ioctl_balance(file, argp);
        case BTRFS_IOC_BALANCE_CTL:
                return btrfs_ioctl_balance_ctl(root, arg);
        case BTRFS_IOC_BALANCE_PROGRESS:
                return btrfs_ioctl_balance_progress(root, argp);
+       case BTRFS_IOC_GET_DEV_STATS:
+               return btrfs_ioctl_get_dev_stats(root, argp, 0);
+       case BTRFS_IOC_GET_AND_RESET_DEV_STATS:
+               return btrfs_ioctl_get_dev_stats(root, argp, 1);
        }
 
        return -ENOTTY;
index 086e6bdae1c4482b93b6dda4d16b1c5af288f2eb..e440aa653c30d6f6c8ad1e7437bfa6e2b4d50799 100644 (file)
@@ -266,6 +266,35 @@ struct btrfs_ioctl_logical_ino_args {
        __u64                           inodes;
 };
 
+enum btrfs_dev_stat_values {
+       /* disk I/O failure stats */
+       BTRFS_DEV_STAT_WRITE_ERRS, /* EIO or EREMOTEIO from lower layers */
+       BTRFS_DEV_STAT_READ_ERRS, /* EIO or EREMOTEIO from lower layers */
+       BTRFS_DEV_STAT_FLUSH_ERRS, /* EIO or EREMOTEIO from lower layers */
+
+       /* stats for indirect indications for I/O failures */
+       BTRFS_DEV_STAT_CORRUPTION_ERRS, /* checksum error, bytenr error or
+                                        * contents is illegal: this is an
+                                        * indication that the block was damaged
+                                        * during read or write, or written to
+                                        * wrong location or read from wrong
+                                        * location */
+       BTRFS_DEV_STAT_GENERATION_ERRS, /* an indication that blocks have not
+                                        * been written */
+
+       BTRFS_DEV_STAT_VALUES_MAX
+};
+
+struct btrfs_ioctl_get_dev_stats {
+       __u64 devid;                            /* in */
+       __u64 nr_items;                         /* in/out */
+
+       /* out values: */
+       __u64 values[BTRFS_DEV_STAT_VALUES_MAX];
+
+       __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
+};
+
 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
                                   struct btrfs_ioctl_vol_args)
 #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -310,7 +339,7 @@ struct btrfs_ioctl_logical_ino_args {
 #define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
 #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
                                   struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
 #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
 #define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
                              struct btrfs_ioctl_scrub_args)
@@ -330,5 +359,9 @@ struct btrfs_ioctl_logical_ino_args {
                                        struct btrfs_ioctl_ino_path_args)
 #define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
                                        struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
+                                     struct btrfs_ioctl_get_dev_stats)
+#define BTRFS_IOC_GET_AND_RESET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 53, \
+                                       struct btrfs_ioctl_get_dev_stats)
 
 #endif
index bbf6d0d9aebe9b68f0ea8e5c121783d81733f7d7..643335a4fe3c6699a894c19d01e79bed9ef631c7 100644 (file)
@@ -196,7 +196,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
        entry->len = len;
        entry->disk_len = disk_len;
        entry->bytes_left = len;
-       entry->inode = inode;
+       entry->inode = igrab(inode);
        entry->compress_type = compress_type;
        if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
                set_bit(type, &entry->flags);
@@ -212,12 +212,12 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 
        trace_btrfs_ordered_extent_add(inode, entry);
 
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_insert(&tree->tree, file_offset,
                           &entry->rb_node);
        if (node)
                ordered_data_tree_panic(inode, -EEXIST, file_offset);
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
 
        spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
        list_add_tail(&entry->root_extent_list,
@@ -264,9 +264,9 @@ void btrfs_add_ordered_sum(struct inode *inode,
        struct btrfs_ordered_inode_tree *tree;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        list_add_tail(&sum->list, &entry->list);
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
 }
 
 /*
@@ -283,18 +283,19 @@ void btrfs_add_ordered_sum(struct inode *inode,
  */
 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 *file_offset, u64 io_size)
+                                  u64 *file_offset, u64 io_size, int uptodate)
 {
        struct btrfs_ordered_inode_tree *tree;
        struct rb_node *node;
        struct btrfs_ordered_extent *entry = NULL;
        int ret;
+       unsigned long flags;
        u64 dec_end;
        u64 dec_start;
        u64 to_dec;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irqsave(&tree->lock, flags);
        node = tree_search(tree, *file_offset);
        if (!node) {
                ret = 1;
@@ -323,6 +324,9 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
                       (unsigned long long)to_dec);
        }
        entry->bytes_left -= to_dec;
+       if (!uptodate)
+               set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+
        if (entry->bytes_left == 0)
                ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
        else
@@ -332,7 +336,7 @@ out:
                *cached = entry;
                atomic_inc(&entry->refs);
        }
-       spin_unlock(&tree->lock);
+       spin_unlock_irqrestore(&tree->lock, flags);
        return ret == 0;
 }
 
@@ -347,15 +351,21 @@ out:
  */
 int btrfs_dec_test_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 file_offset, u64 io_size)
+                                  u64 file_offset, u64 io_size, int uptodate)
 {
        struct btrfs_ordered_inode_tree *tree;
        struct rb_node *node;
        struct btrfs_ordered_extent *entry = NULL;
+       unsigned long flags;
        int ret;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irqsave(&tree->lock, flags);
+       if (cached && *cached) {
+               entry = *cached;
+               goto have_entry;
+       }
+
        node = tree_search(tree, file_offset);
        if (!node) {
                ret = 1;
@@ -363,6 +373,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
        }
 
        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+have_entry:
        if (!offset_in_entry(entry, file_offset)) {
                ret = 1;
                goto out;
@@ -374,6 +385,9 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
                       (unsigned long long)io_size);
        }
        entry->bytes_left -= io_size;
+       if (!uptodate)
+               set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+
        if (entry->bytes_left == 0)
                ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
        else
@@ -383,7 +397,7 @@ out:
                *cached = entry;
                atomic_inc(&entry->refs);
        }
-       spin_unlock(&tree->lock);
+       spin_unlock_irqrestore(&tree->lock, flags);
        return ret == 0;
 }
 
@@ -399,6 +413,8 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
        trace_btrfs_ordered_extent_put(entry->inode, entry);
 
        if (atomic_dec_and_test(&entry->refs)) {
+               if (entry->inode)
+                       btrfs_add_delayed_iput(entry->inode);
                while (!list_empty(&entry->list)) {
                        cur = entry->list.next;
                        sum = list_entry(cur, struct btrfs_ordered_sum, list);
@@ -411,21 +427,22 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 
 /*
  * remove an ordered extent from the tree.  No references are dropped
- * and you must wake_up entry->wait.  You must hold the tree lock
- * while you call this function.
+ * and waiters are woken up.
  */
-static void __btrfs_remove_ordered_extent(struct inode *inode,
-                                         struct btrfs_ordered_extent *entry)
+void btrfs_remove_ordered_extent(struct inode *inode,
+                                struct btrfs_ordered_extent *entry)
 {
        struct btrfs_ordered_inode_tree *tree;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct rb_node *node;
 
        tree = &BTRFS_I(inode)->ordered_tree;
+       spin_lock_irq(&tree->lock);
        node = &entry->rb_node;
        rb_erase(node, &tree->tree);
        tree->last = NULL;
        set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
+       spin_unlock_irq(&tree->lock);
 
        spin_lock(&root->fs_info->ordered_extent_lock);
        list_del_init(&entry->root_extent_list);
@@ -442,21 +459,6 @@ static void __btrfs_remove_ordered_extent(struct inode *inode,
                list_del_init(&BTRFS_I(inode)->ordered_operations);
        }
        spin_unlock(&root->fs_info->ordered_extent_lock);
-}
-
-/*
- * remove an ordered extent from the tree.  No references are dropped
- * but any waiters are woken.
- */
-void btrfs_remove_ordered_extent(struct inode *inode,
-                                struct btrfs_ordered_extent *entry)
-{
-       struct btrfs_ordered_inode_tree *tree;
-
-       tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
-       __btrfs_remove_ordered_extent(inode, entry);
-       spin_unlock(&tree->lock);
        wake_up(&entry->wait);
 }
 
@@ -621,17 +623,29 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
                if (orig_end > INT_LIMIT(loff_t))
                        orig_end = INT_LIMIT(loff_t);
        }
-again:
+
        /* start IO across the range first to instantiate any delalloc
         * extents
         */
        filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
-       /* The compression code will leave pages locked but return from
-        * writepage without setting the page writeback.  Starting again
-        * with WB_SYNC_ALL will end up waiting for the IO to actually start.
+       /*
+        * So with compression we will find and lock a dirty page and clear the
+        * first one as dirty, setup an async extent, and immediately return
+        * with the entire range locked but with nobody actually marked with
+        * writeback.  So we can't just filemap_write_and_wait_range() and
+        * expect it to work since it will just kick off a thread to do the
+        * actual work.  So we need to call filemap_fdatawrite_range _again_
+        * since it will wait on the page lock, which won't be unlocked until
+        * after the pages have been marked as writeback and so we're good to go
+        * from there.  We have to do this otherwise we'll miss the ordered
+        * extents and that results in badness.  Please Josef, do not think you
+        * know better and pull this out at some point in the future, it is
+        * right and you are wrong.
         */
-       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+       if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                    &BTRFS_I(inode)->runtime_flags))
+               filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
        filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
@@ -657,11 +671,6 @@ again:
                        break;
                end--;
        }
-       if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
-                          EXTENT_DELALLOC, 0, NULL)) {
-               schedule_timeout(1);
-               goto again;
-       }
 }
 
 /*
@@ -676,7 +685,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
        struct btrfs_ordered_extent *entry = NULL;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_search(tree, file_offset);
        if (!node)
                goto out;
@@ -687,7 +696,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
        if (entry)
                atomic_inc(&entry->refs);
 out:
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        return entry;
 }
 
@@ -703,7 +712,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
        struct btrfs_ordered_extent *entry = NULL;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_search(tree, file_offset);
        if (!node) {
                node = tree_search(tree, file_offset + len);
@@ -728,7 +737,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
 out:
        if (entry)
                atomic_inc(&entry->refs);
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        return entry;
 }
 
@@ -744,7 +753,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
        struct btrfs_ordered_extent *entry = NULL;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_search(tree, file_offset);
        if (!node)
                goto out;
@@ -752,7 +761,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
        atomic_inc(&entry->refs);
 out:
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        return entry;
 }
 
@@ -764,7 +773,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                                struct btrfs_ordered_extent *ordered)
 {
        struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
-       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        u64 disk_i_size;
        u64 new_i_size;
        u64 i_size_test;
@@ -779,7 +787,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
        else
                offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
 
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        disk_i_size = BTRFS_I(inode)->disk_i_size;
 
        /* truncate file */
@@ -797,14 +805,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                goto out;
        }
 
-       /*
-        * we can't update the disk_isize if there are delalloc bytes
-        * between disk_i_size and  this ordered extent
-        */
-       if (test_range_bit(io_tree, disk_i_size, offset - 1,
-                          EXTENT_DELALLOC, 0, NULL)) {
-               goto out;
-       }
        /*
         * walk backward from this ordered extent to disk_i_size.
         * if we find an ordered extent then we can't update disk i_size
@@ -825,15 +825,18 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                }
                node = prev;
        }
-       while (node) {
+       for (; node; node = rb_prev(node)) {
                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+
+               /* We treat this entry as if it doesnt exist */
+               if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
+                       continue;
                if (test->file_offset + test->len <= disk_i_size)
                        break;
                if (test->file_offset >= i_size)
                        break;
                if (test->file_offset >= disk_i_size)
                        goto out;
-               node = rb_prev(node);
        }
        new_i_size = min_t(u64, offset, i_size);
 
@@ -851,43 +854,49 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                else
                        node = rb_first(&tree->tree);
        }
-       i_size_test = 0;
-       if (node) {
-               /*
-                * do we have an area where IO might have finished
-                * between our ordered extent and the next one.
-                */
+
+       /*
+        * We are looking for an area between our current extent and the next
+        * ordered extent to update the i_size to.  There are 3 cases here
+        *
+        * 1) We don't actually have anything and we can update to i_size.
+        * 2) We have stuff but they already did their i_size update so again we
+        * can just update to i_size.
+        * 3) We have an outstanding ordered extent so the most we can update
+        * our disk_i_size to is the start of the next offset.
+        */
+       i_size_test = i_size;
+       for (; node; node = rb_next(node)) {
                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
-               if (test->file_offset > offset)
+
+               if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
+                       continue;
+               if (test->file_offset > offset) {
                        i_size_test = test->file_offset;
-       } else {
-               i_size_test = i_size;
+                       break;
+               }
        }
 
        /*
         * i_size_test is the end of a region after this ordered
-        * extent where there are no ordered extents.  As long as there
-        * are no delalloc bytes in this area, it is safe to update
-        * disk_i_size to the end of the region.
+        * extent where there are no ordered extents, we can safely set
+        * disk_i_size to this.
         */
-       if (i_size_test > offset &&
-           !test_range_bit(io_tree, offset, i_size_test - 1,
-                           EXTENT_DELALLOC, 0, NULL)) {
+       if (i_size_test > offset)
                new_i_size = min_t(u64, i_size_test, i_size);
-       }
        BTRFS_I(inode)->disk_i_size = new_i_size;
        ret = 0;
 out:
        /*
-        * we need to remove the ordered extent with the tree lock held
-        * so that other people calling this function don't find our fully
-        * processed ordered entry and skip updating the i_size
+        * We need to do this because we can't remove ordered extents until
+        * after the i_disk_size has been updated and then the inode has been
+        * updated to reflect the change, so we need to tell anybody who finds
+        * this ordered extent that we've already done all the real work, we
+        * just haven't completed all the other work.
         */
        if (ordered)
-               __btrfs_remove_ordered_extent(inode, ordered);
-       spin_unlock(&tree->lock);
-       if (ordered)
-               wake_up(&ordered->wait);
+               set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
+       spin_unlock_irq(&tree->lock);
        return ret;
 }
 
@@ -912,7 +921,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
        if (!ordered)
                return 1;
 
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
                if (disk_bytenr >= ordered_sum->bytenr) {
                        num_sectors = ordered_sum->len / sectorsize;
@@ -927,7 +936,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
                }
        }
 out:
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        btrfs_put_ordered_extent(ordered);
        return ret;
 }
index c355ad4dc1a66962d30557e9bbdc08ca9fc25da8..e03c560d299732cfe2114fe41d049b691a949e61 100644 (file)
@@ -74,6 +74,12 @@ struct btrfs_ordered_sum {
 
 #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
 
+#define BTRFS_ORDERED_IOERR 6 /* We had an io error when writing this out */
+
+#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates wether this ordered extent
+                                      * has done its due diligence in updating
+                                      * the isize. */
+
 struct btrfs_ordered_extent {
        /* logical offset in the file */
        u64 file_offset;
@@ -113,6 +119,8 @@ struct btrfs_ordered_extent {
 
        /* a per root list of all the pending ordered extents */
        struct list_head root_extent_list;
+
+       struct btrfs_work work;
 };
 
 
@@ -143,10 +151,11 @@ void btrfs_remove_ordered_extent(struct inode *inode,
                                struct btrfs_ordered_extent *entry);
 int btrfs_dec_test_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 file_offset, u64 io_size);
+                                  u64 file_offset, u64 io_size, int uptodate);
 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 *file_offset, u64 io_size);
+                                  u64 *file_offset, u64 io_size,
+                                  int uptodate);
 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
                             u64 start, u64 len, u64 disk_len, int type);
 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
index f38e452486b8d12ba36589248579dc158981c3be..5e23684887eb8eb401594af69b1be7372f7188aa 100644 (file)
@@ -294,6 +294,9 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
                               btrfs_dev_extent_chunk_offset(l, dev_extent),
                               (unsigned long long)
                               btrfs_dev_extent_length(l, dev_extent));
+               case BTRFS_DEV_STATS_KEY:
+                       printk(KERN_INFO "\t\tdevice stats\n");
+                       break;
                };
        }
 }
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
new file mode 100644 (file)
index 0000000..9e111e4
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+struct rcu_string {
+       struct rcu_head rcu;
+       char str[0];
+};
+
+static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
+{
+       size_t len = strlen(src) + 1;
+       struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) +
+                                        (len * sizeof(char)), mask);
+       if (!ret)
+               return ret;
+       strncpy(ret->str, src, len);
+       return ret;
+}
+
+static inline void rcu_string_free(struct rcu_string *str)
+{
+       if (str)
+               kfree_rcu(str, rcu);
+}
+
+#define printk_in_rcu(fmt, ...) do {   \
+       rcu_read_lock();                \
+       printk(fmt, __VA_ARGS__);       \
+       rcu_read_unlock();              \
+} while (0)
+
+#define printk_ratelimited_in_rcu(fmt, ...) do {       \
+       rcu_read_lock();                                \
+       printk_ratelimited(fmt, __VA_ARGS__);           \
+       rcu_read_unlock();                              \
+} while (0)
+
+#define rcu_str_deref(rcu_str) ({                              \
+       struct rcu_string *__str = rcu_dereference(rcu_str);    \
+       __str->str;                                             \
+})
index ac5d010858848d007e380d529476ad9eb4f6fb31..48a4882d8ad5955eaa0be2b940e35f0b3b2a7f6f 100644 (file)
@@ -718,13 +718,18 @@ static void reada_start_machine_worker(struct btrfs_work *work)
 {
        struct reada_machine_work *rmw;
        struct btrfs_fs_info *fs_info;
+       int old_ioprio;
 
        rmw = container_of(work, struct reada_machine_work, work);
        fs_info = rmw->fs_info;
 
        kfree(rmw);
 
+       old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
+                                      task_nice_ioprio(current));
+       set_task_ioprio(current, BTRFS_IOPRIO_READA);
        __reada_start_machine(fs_info);
+       set_task_ioprio(current, old_ioprio);
 }
 
 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
index 2f3d6f917fb3373c02335b6912fcba1006f5fabe..b223620cd5a6d59aa4b707e2539e59a8244d2dd1 100644 (file)
@@ -26,6 +26,7 @@
 #include "backref.h"
 #include "extent_io.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -50,7 +51,7 @@ struct scrub_dev;
 struct scrub_page {
        struct scrub_block      *sblock;
        struct page             *page;
-       struct block_device     *bdev;
+       struct btrfs_device     *dev;
        u64                     flags;  /* extent flags */
        u64                     generation;
        u64                     logical;
@@ -86,6 +87,7 @@ struct scrub_block {
                unsigned int    header_error:1;
                unsigned int    checksum_error:1;
                unsigned int    no_io_error_seen:1;
+               unsigned int    generation_error:1; /* also sets header_error */
        };
 };
 
@@ -319,10 +321,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
         * hold all of the paths here
         */
        for (i = 0; i < ipath->fspath->elem_cnt; ++i)
-               printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+               printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
                        "%s, sector %llu, root %llu, inode %llu, offset %llu, "
                        "length %llu, links %u (path: %s)\n", swarn->errstr,
-                       swarn->logical, swarn->dev->name,
+                       swarn->logical, rcu_str_deref(swarn->dev->name),
                        (unsigned long long)swarn->sector, root, inum, offset,
                        min(isize - offset, (u64)PAGE_SIZE), nlink,
                        (char *)(unsigned long)ipath->fspath->val[i]);
@@ -331,10 +333,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
        return 0;
 
 err:
-       printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+       printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
                "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
                "resolving failed with ret=%d\n", swarn->errstr,
-               swarn->logical, swarn->dev->name,
+               swarn->logical, rcu_str_deref(swarn->dev->name),
                (unsigned long long)swarn->sector, root, inum, offset, ret);
 
        free_ipath(ipath);
@@ -389,10 +391,11 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
                do {
                        ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
                                                        &ref_root, &ref_level);
-                       printk(KERN_WARNING
+                       printk_in_rcu(KERN_WARNING
                                "btrfs: %s at logical %llu on dev %s, "
                                "sector %llu: metadata %s (level %d) in tree "
-                               "%llu\n", errstr, swarn.logical, dev->name,
+                               "%llu\n", errstr, swarn.logical,
+                               rcu_str_deref(dev->name),
                                (unsigned long long)swarn.sector,
                                ref_level ? "node" : "leaf",
                                ret < 0 ? -1 : ref_level,
@@ -579,9 +582,11 @@ out:
                spin_lock(&sdev->stat_lock);
                ++sdev->stat.uncorrectable_errors;
                spin_unlock(&sdev->stat_lock);
-               printk_ratelimited(KERN_ERR
+
+               printk_ratelimited_in_rcu(KERN_ERR
                        "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
-                       (unsigned long long)fixup->logical, sdev->dev->name);
+                       (unsigned long long)fixup->logical,
+                       rcu_str_deref(sdev->dev->name));
        }
 
        btrfs_free_path(path);
@@ -675,6 +680,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                sdev->stat.read_errors++;
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
                goto out;
        }
 
@@ -686,6 +693,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                sdev->stat.read_errors++;
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
                goto out;
        }
        BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
@@ -699,6 +708,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                sdev->stat.read_errors++;
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
                goto out;
        }
 
@@ -725,12 +736,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                spin_unlock(&sdev->stat_lock);
                if (__ratelimit(&_rs))
                        scrub_print_warning("i/o error", sblock_to_check);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
        } else if (sblock_bad->checksum_error) {
                spin_lock(&sdev->stat_lock);
                sdev->stat.csum_errors++;
                spin_unlock(&sdev->stat_lock);
                if (__ratelimit(&_rs))
                        scrub_print_warning("checksum error", sblock_to_check);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_CORRUPTION_ERRS);
        } else if (sblock_bad->header_error) {
                spin_lock(&sdev->stat_lock);
                sdev->stat.verify_errors++;
@@ -738,6 +753,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                if (__ratelimit(&_rs))
                        scrub_print_warning("checksum/header error",
                                            sblock_to_check);
+               if (sblock_bad->generation_error)
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_GENERATION_ERRS);
+               else
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_CORRUPTION_ERRS);
        }
 
        if (sdev->readonly)
@@ -919,18 +940,20 @@ corrected_error:
                        spin_lock(&sdev->stat_lock);
                        sdev->stat.corrected_errors++;
                        spin_unlock(&sdev->stat_lock);
-                       printk_ratelimited(KERN_ERR
+                       printk_ratelimited_in_rcu(KERN_ERR
                                "btrfs: fixed up error at logical %llu on dev %s\n",
-                               (unsigned long long)logical, sdev->dev->name);
+                               (unsigned long long)logical,
+                               rcu_str_deref(sdev->dev->name));
                }
        } else {
 did_not_correct_error:
                spin_lock(&sdev->stat_lock);
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
-               printk_ratelimited(KERN_ERR
+               printk_ratelimited_in_rcu(KERN_ERR
                        "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
-                       (unsigned long long)logical, sdev->dev->name);
+                       (unsigned long long)logical,
+                       rcu_str_deref(sdev->dev->name));
        }
 
 out:
@@ -998,8 +1021,8 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
                        page = sblock->pagev + page_index;
                        page->logical = logical;
                        page->physical = bbio->stripes[mirror_index].physical;
-                       /* for missing devices, bdev is NULL */
-                       page->bdev = bbio->stripes[mirror_index].dev->bdev;
+                       /* for missing devices, dev->bdev is NULL */
+                       page->dev = bbio->stripes[mirror_index].dev;
                        page->mirror_num = mirror_index + 1;
                        page->page = alloc_page(GFP_NOFS);
                        if (!page->page) {
@@ -1043,7 +1066,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
                struct scrub_page *page = sblock->pagev + page_num;
                DECLARE_COMPLETION_ONSTACK(complete);
 
-               if (page->bdev == NULL) {
+               if (page->dev->bdev == NULL) {
                        page->io_error = 1;
                        sblock->no_io_error_seen = 0;
                        continue;
@@ -1053,7 +1076,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
                bio = bio_alloc(GFP_NOFS, 1);
                if (!bio)
                        return -EIO;
-               bio->bi_bdev = page->bdev;
+               bio->bi_bdev = page->dev->bdev;
                bio->bi_sector = page->physical >> 9;
                bio->bi_end_io = scrub_complete_bio_end_io;
                bio->bi_private = &complete;
@@ -1102,11 +1125,14 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
                h = (struct btrfs_header *)mapped_buffer;
 
                if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
-                   generation != le64_to_cpu(h->generation) ||
                    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
                    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
-                          BTRFS_UUID_SIZE))
+                          BTRFS_UUID_SIZE)) {
+                       sblock->header_error = 1;
+               } else if (generation != le64_to_cpu(h->generation)) {
                        sblock->header_error = 1;
+                       sblock->generation_error = 1;
+               }
                csum = h->csum;
        } else {
                if (!have_csum)
@@ -1182,7 +1208,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                bio = bio_alloc(GFP_NOFS, 1);
                if (!bio)
                        return -EIO;
-               bio->bi_bdev = page_bad->bdev;
+               bio->bi_bdev = page_bad->dev->bdev;
                bio->bi_sector = page_bad->physical >> 9;
                bio->bi_end_io = scrub_complete_bio_end_io;
                bio->bi_private = &complete;
@@ -1196,6 +1222,12 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 
                /* this will also unplug the queue */
                wait_for_completion(&complete);
+               if (!bio_flagged(bio, BIO_UPTODATE)) {
+                       btrfs_dev_stat_inc_and_print(page_bad->dev,
+                               BTRFS_DEV_STAT_WRITE_ERRS);
+                       bio_put(bio);
+                       return -EIO;
+               }
                bio_put(bio);
        }
 
@@ -1352,7 +1384,8 @@ static int scrub_checksum_super(struct scrub_block *sblock)
        u64 mapped_size;
        void *p;
        u32 crc = ~(u32)0;
-       int fail = 0;
+       int fail_gen = 0;
+       int fail_cor = 0;
        u64 len;
        int index;
 
@@ -1363,13 +1396,13 @@ static int scrub_checksum_super(struct scrub_block *sblock)
        memcpy(on_disk_csum, s->csum, sdev->csum_size);
 
        if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
-               ++fail;
+               ++fail_cor;
 
        if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
-               ++fail;
+               ++fail_gen;
 
        if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
-               ++fail;
+               ++fail_cor;
 
        len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
        mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
@@ -1394,9 +1427,9 @@ static int scrub_checksum_super(struct scrub_block *sblock)
 
        btrfs_csum_final(crc, calculated_csum);
        if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
-               ++fail;
+               ++fail_cor;
 
-       if (fail) {
+       if (fail_cor + fail_gen) {
                /*
                 * if we find an error in a super block, we just report it.
                 * They will get written with the next transaction commit
@@ -1405,9 +1438,15 @@ static int scrub_checksum_super(struct scrub_block *sblock)
                spin_lock(&sdev->stat_lock);
                ++sdev->stat.super_errors;
                spin_unlock(&sdev->stat_lock);
+               if (fail_cor)
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_CORRUPTION_ERRS);
+               else
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_GENERATION_ERRS);
        }
 
-       return fail;
+       return fail_cor + fail_gen;
 }
 
 static void scrub_block_get(struct scrub_block *sblock)
@@ -1551,7 +1590,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
                        return -ENOMEM;
                }
                spage->sblock = sblock;
-               spage->bdev = sdev->dev->bdev;
+               spage->dev = sdev->dev;
                spage->flags = flags;
                spage->generation = gen;
                spage->logical = logical;
index c5f8fca4195fca9eb3806ebfbccf52d03049691e..e23991574fdf309e6ed95e23985fc5dd0af28e0f 100644 (file)
@@ -54,6 +54,7 @@
 #include "version.h"
 #include "export.h"
 #include "compression.h"
+#include "rcu-string.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/btrfs.h>
@@ -188,7 +189,8 @@ void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
        va_start(args, fmt);
 
        if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
-               strncpy(lvl, fmt, 3);
+               memcpy(lvl, fmt, 3);
+               lvl[3] = '\0';
                fmt += 3;
                type = logtypes[fmt[1] - '0'];
        } else
@@ -435,11 +437,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_thread_pool:
                        intarg = 0;
                        match_int(&args[0], &intarg);
-                       if (intarg) {
+                       if (intarg)
                                info->thread_pool_size = intarg;
-                               printk(KERN_INFO "btrfs: thread pool %d\n",
-                                      info->thread_pool_size);
-                       }
                        break;
                case Opt_max_inline:
                        num = match_strdup(&args[0]);
@@ -769,7 +768,7 @@ static int btrfs_fill_super(struct super_block *sb,
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
 #endif
-
+       sb->s_flags |= MS_I_VERSION;
        err = open_ctree(sb, fs_devices, (char *)data);
        if (err) {
                printk("btrfs: open_ctree failed\n");
@@ -925,63 +924,48 @@ static inline int is_subvolume_inode(struct inode *inode)
  */
 static char *setup_root_args(char *args)
 {
-       unsigned copied = 0;
-       unsigned len = strlen(args) + 2;
-       char *pos;
-       char *ret;
+       unsigned len = strlen(args) + 2 + 1;
+       char *src, *dst, *buf;
 
        /*
-        * We need the same args as before, but minus
-        *
-        * subvol=a
-        *
-        * and add
+        * We need the same args as before, but with this substitution:
+        * s!subvol=[^,]+!subvolid=0!
         *
-        * subvolid=0
-        *
-        * which is a difference of 2 characters, so we allocate strlen(args) +
-        * 2 characters.
+        * Since the replacement string is up to 2 bytes longer than the
+        * original, allocate strlen(args) + 2 + 1 bytes.
         */
-       ret = kzalloc(len * sizeof(char), GFP_NOFS);
-       if (!ret)
-               return NULL;
-       pos = strstr(args, "subvol=");
 
+       src = strstr(args, "subvol=");
        /* This shouldn't happen, but just in case.. */
-       if (!pos) {
-               kfree(ret);
+       if (!src)
+               return NULL;
+
+       buf = dst = kmalloc(len, GFP_NOFS);
+       if (!buf)
                return NULL;
-       }
 
        /*
-        * The subvol=<> arg is not at the front of the string, copy everybody
-        * up to that into ret.
+        * If the subvol= arg is not at the start of the string,
+        * copy whatever precedes it into buf.
         */
-       if (pos != args) {
-               *pos = '\0';
-               strcpy(ret, args);
-               copied += strlen(args);
-               pos++;
+       if (src != args) {
+               *src++ = '\0';
+               strcpy(buf, args);
+               dst += strlen(args);
        }
 
-       strncpy(ret + copied, "subvolid=0", len - copied);
-
-       /* Length of subvolid=0 */
-       copied += 10;
+       strcpy(dst, "subvolid=0");
+       dst += strlen("subvolid=0");
 
        /*
-        * If there is no , after the subvol= option then we know there's no
-        * other options and we can just return.
+        * If there is a "," after the original subvol=... string,
+        * copy that suffix into our buffer.  Otherwise, we're done.
         */
-       pos = strchr(pos, ',');
-       if (!pos)
-               return ret;
+       src = strchr(src, ',');
+       if (src)
+               strcpy(dst, src);
 
-       /* Copy the rest of the arguments into our buffer */
-       strncpy(ret + copied, pos, len - copied);
-       copied += strlen(pos);
-
-       return ret;
+       return buf;
 }
 
 static struct dentry *mount_subvol(const char *subvol_name, int flags,
@@ -1118,6 +1102,40 @@ error_fs_info:
        return ERR_PTR(error);
 }
 
+static void btrfs_set_max_workers(struct btrfs_workers *workers, int new_limit)
+{
+       spin_lock_irq(&workers->lock);
+       workers->max_workers = new_limit;
+       spin_unlock_irq(&workers->lock);
+}
+
+static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
+                                    int new_pool_size, int old_pool_size)
+{
+       if (new_pool_size == old_pool_size)
+               return;
+
+       fs_info->thread_pool_size = new_pool_size;
+
+       printk(KERN_INFO "btrfs: resize thread pool %d -> %d\n",
+              old_pool_size, new_pool_size);
+
+       btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
+       btrfs_set_max_workers(&fs_info->workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->delalloc_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->fixup_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_meta_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_meta_write_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_write_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_freespace_worker, new_pool_size);
+       btrfs_set_max_workers(&fs_info->delayed_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->readahead_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->scrub_workers, new_pool_size);
+}
+
 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -1137,6 +1155,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                goto restore;
        }
 
+       btrfs_resize_thread_pool(fs_info,
+               fs_info->thread_pool_size, old_thread_pool_size);
+
        if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
                return 0;
 
@@ -1166,6 +1187,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
+               ret = btrfs_resume_balance_async(fs_info);
+               if (ret)
+                       goto restore;
+
                sb->s_flags &= ~MS_RDONLY;
        }
 
@@ -1180,7 +1205,8 @@ restore:
        fs_info->compress_type = old_compress_type;
        fs_info->max_inline = old_max_inline;
        fs_info->alloc_start = old_alloc_start;
-       fs_info->thread_pool_size = old_thread_pool_size;
+       btrfs_resize_thread_pool(fs_info,
+               old_thread_pool_size, fs_info->thread_pool_size);
        fs_info->metadata_ratio = old_metadata_ratio;
        return ret;
 }
@@ -1461,12 +1487,44 @@ static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
                                   "error %d\n", btrfs_ino(inode), ret);
 }
 
+static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
+{
+       struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
+       struct btrfs_fs_devices *cur_devices;
+       struct btrfs_device *dev, *first_dev = NULL;
+       struct list_head *head;
+       struct rcu_string *name;
+
+       mutex_lock(&fs_info->fs_devices->device_list_mutex);
+       cur_devices = fs_info->fs_devices;
+       while (cur_devices) {
+               head = &cur_devices->devices;
+               list_for_each_entry(dev, head, dev_list) {
+                       if (!first_dev || dev->devid < first_dev->devid)
+                               first_dev = dev;
+               }
+               cur_devices = cur_devices->seed;
+       }
+
+       if (first_dev) {
+               rcu_read_lock();
+               name = rcu_dereference(first_dev->name);
+               seq_escape(m, name->str, " \t\n\\");
+               rcu_read_unlock();
+       } else {
+               WARN_ON(1);
+       }
+       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+       return 0;
+}
+
 static const struct super_operations btrfs_super_ops = {
        .drop_inode     = btrfs_drop_inode,
        .evict_inode    = btrfs_evict_inode,
        .put_super      = btrfs_put_super,
        .sync_fs        = btrfs_sync_fs,
        .show_options   = btrfs_show_options,
+       .show_devname   = btrfs_show_devname,
        .write_inode    = btrfs_write_inode,
        .dirty_inode    = btrfs_fs_dirty_inode,
        .alloc_inode    = btrfs_alloc_inode,
index 36422254ef6765c14290a2373fa6d83cf2d364d5..b72b068183ec6bb334a1cb9088ffa02d7eb13029 100644 (file)
@@ -28,6 +28,7 @@
 #include "locking.h"
 #include "tree-log.h"
 #include "inode-map.h"
+#include "volumes.h"
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
@@ -55,49 +56,54 @@ static noinline void switch_commit_root(struct btrfs_root *root)
 static noinline int join_transaction(struct btrfs_root *root, int nofail)
 {
        struct btrfs_transaction *cur_trans;
+       struct btrfs_fs_info *fs_info = root->fs_info;
 
-       spin_lock(&root->fs_info->trans_lock);
+       spin_lock(&fs_info->trans_lock);
 loop:
        /* The file system has been taken offline. No new transactions. */
-       if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
-               spin_unlock(&root->fs_info->trans_lock);
+       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+               spin_unlock(&fs_info->trans_lock);
                return -EROFS;
        }
 
-       if (root->fs_info->trans_no_join) {
+       if (fs_info->trans_no_join) {
                if (!nofail) {
-                       spin_unlock(&root->fs_info->trans_lock);
+                       spin_unlock(&fs_info->trans_lock);
                        return -EBUSY;
                }
        }
 
-       cur_trans = root->fs_info->running_transaction;
+       cur_trans = fs_info->running_transaction;
        if (cur_trans) {
                if (cur_trans->aborted) {
-                       spin_unlock(&root->fs_info->trans_lock);
+                       spin_unlock(&fs_info->trans_lock);
                        return cur_trans->aborted;
                }
                atomic_inc(&cur_trans->use_count);
                atomic_inc(&cur_trans->num_writers);
                cur_trans->num_joined++;
-               spin_unlock(&root->fs_info->trans_lock);
+               spin_unlock(&fs_info->trans_lock);
                return 0;
        }
-       spin_unlock(&root->fs_info->trans_lock);
+       spin_unlock(&fs_info->trans_lock);
 
        cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
        if (!cur_trans)
                return -ENOMEM;
 
-       spin_lock(&root->fs_info->trans_lock);
-       if (root->fs_info->running_transaction) {
+       spin_lock(&fs_info->trans_lock);
+       if (fs_info->running_transaction) {
                /*
                 * someone started a transaction after we unlocked.  Make sure
                 * to redo the trans_no_join checks above
                 */
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
-               cur_trans = root->fs_info->running_transaction;
+               cur_trans = fs_info->running_transaction;
                goto loop;
+       } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+               spin_unlock(&root->fs_info->trans_lock);
+               kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+               return -EROFS;
        }
 
        atomic_set(&cur_trans->num_writers, 1);
@@ -121,20 +127,38 @@ loop:
        cur_trans->delayed_refs.flushing = 0;
        cur_trans->delayed_refs.run_delayed_start = 0;
        cur_trans->delayed_refs.seq = 1;
+
+       /*
+        * although the tree mod log is per file system and not per transaction,
+        * the log must never go across transaction boundaries.
+        */
+       smp_mb();
+       if (!list_empty(&fs_info->tree_mod_seq_list)) {
+               printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
+                       "creating a fresh transaction\n");
+               WARN_ON(1);
+       }
+       if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
+               printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
+                       "creating a fresh transaction\n");
+               WARN_ON(1);
+       }
+       atomic_set(&fs_info->tree_mod_seq, 0);
+
        init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
        spin_lock_init(&cur_trans->commit_lock);
        spin_lock_init(&cur_trans->delayed_refs.lock);
        INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
-       list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
+       list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
-                            root->fs_info->btree_inode->i_mapping);
-       root->fs_info->generation++;
-       cur_trans->transid = root->fs_info->generation;
-       root->fs_info->running_transaction = cur_trans;
+                            fs_info->btree_inode->i_mapping);
+       fs_info->generation++;
+       cur_trans->transid = fs_info->generation;
+       fs_info->running_transaction = cur_trans;
        cur_trans->aborted = 0;
-       spin_unlock(&root->fs_info->trans_lock);
+       spin_unlock(&fs_info->trans_lock);
 
        return 0;
 }
@@ -758,6 +782,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
        if (ret)
                return ret;
 
+       ret = btrfs_run_dev_stats(trans, root->fs_info);
+       BUG_ON(ret);
+
        while (!list_empty(&fs_info->dirty_cowonly_roots)) {
                next = fs_info->dirty_cowonly_roots.next;
                list_del_init(next);
@@ -1190,14 +1217,20 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
 
 
 static void cleanup_transaction(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root)
+                               struct btrfs_root *root, int err)
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
 
        WARN_ON(trans->use_count > 1);
 
+       btrfs_abort_transaction(trans, root, err);
+
        spin_lock(&root->fs_info->trans_lock);
        list_del_init(&cur_trans->list);
+       if (cur_trans == root->fs_info->running_transaction) {
+               root->fs_info->running_transaction = NULL;
+               root->fs_info->trans_no_join = 0;
+       }
        spin_unlock(&root->fs_info->trans_lock);
 
        btrfs_cleanup_one_transaction(trans->transaction, root);
@@ -1503,7 +1536,7 @@ cleanup_transaction:
 //     WARN_ON(1);
        if (current->journal_info == trans)
                current->journal_info = NULL;
-       cleanup_transaction(trans, root);
+       cleanup_transaction(trans, root, ret);
 
        return ret;
 }
index eb1ae908582cc51162a61798c80f3ed38e7ab6e8..8abeae4224f92dbd870877b76224515c304e979d 100644 (file)
@@ -690,6 +690,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        kfree(name);
 
        iput(inode);
+
+       btrfs_run_delayed_items(trans, root);
        return ret;
 }
 
@@ -895,6 +897,7 @@ again:
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
                                                         victim_name_len);
+                               btrfs_run_delayed_items(trans, root);
                        }
                        kfree(victim_name);
                        ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1475,6 +1478,9 @@ again:
                        ret = btrfs_unlink_inode(trans, root, dir, inode,
                                                 name, name_len);
                        BUG_ON(ret);
+
+                       btrfs_run_delayed_items(trans, root);
+
                        kfree(name);
                        iput(inode);
 
@@ -1628,7 +1634,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
        int i;
        int ret;
 
-       btrfs_read_buffer(eb, gen);
+       ret = btrfs_read_buffer(eb, gen);
+       if (ret)
+               return ret;
 
        level = btrfs_header_level(eb);
 
@@ -1749,7 +1757,11 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
 
                        path->slots[*level]++;
                        if (wc->free) {
-                               btrfs_read_buffer(next, ptr_gen);
+                               ret = btrfs_read_buffer(next, ptr_gen);
+                               if (ret) {
+                                       free_extent_buffer(next);
+                                       return ret;
+                               }
 
                                btrfs_tree_lock(next);
                                btrfs_set_lock_blocking(next);
@@ -1766,7 +1778,11 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                        free_extent_buffer(next);
                        continue;
                }
-               btrfs_read_buffer(next, ptr_gen);
+               ret = btrfs_read_buffer(next, ptr_gen);
+               if (ret) {
+                       free_extent_buffer(next);
+                       return ret;
+               }
 
                WARN_ON(*level <= 0);
                if (path->nodes[*level-1])
@@ -2657,6 +2673,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
                btrfs_release_path(path);
        }
        btrfs_release_path(path);
+       if (ret > 0)
+               ret = 0;
        return ret;
 }
 
@@ -3028,21 +3046,6 @@ out:
        return ret;
 }
 
-static int inode_in_log(struct btrfs_trans_handle *trans,
-                struct inode *inode)
-{
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       int ret = 0;
-
-       mutex_lock(&root->log_mutex);
-       if (BTRFS_I(inode)->logged_trans == trans->transid &&
-           BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
-               ret = 1;
-       mutex_unlock(&root->log_mutex);
-       return ret;
-}
-
-
 /*
  * helper function around btrfs_log_inode to make sure newly created
  * parent directories also end up in the log.  A minimal inode and backref
@@ -3083,7 +3086,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
        if (ret)
                goto end_no_trans;
 
-       if (inode_in_log(trans, inode)) {
+       if (btrfs_inode_in_log(inode, trans->transid)) {
                ret = BTRFS_NO_LOG_SYNC;
                goto end_no_trans;
        }
index 12f5147bd2b1ae2a6016e7283c72ceccb44283b7..ab942f46b3dd81e06348c4950901f3e4eef87016 100644 (file)
@@ -23,9 +23,9 @@
  *
  * ulist = ulist_alloc();
  * ulist_add(ulist, root);
- * elem = NULL;
+ * ULIST_ITER_INIT(&uiter);
  *
- * while ((elem = ulist_next(ulist, elem)) {
+ * while ((elem = ulist_next(ulist, &uiter)) {
  *     for (all child nodes n in elem)
  *             ulist_add(ulist, n);
  *     do something useful with the node;
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(ulist_reinit);
  *
  * The allocated ulist will be returned in an initialized state.
  */
-struct ulist *ulist_alloc(unsigned long gfp_mask)
+struct ulist *ulist_alloc(gfp_t gfp_mask)
 {
        struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
 
@@ -144,13 +144,22 @@ EXPORT_SYMBOL(ulist_free);
  * unaltered.
  */
 int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
-             unsigned long gfp_mask)
+             gfp_t gfp_mask)
+{
+       return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
+}
+
+int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
+                   unsigned long *old_aux, gfp_t gfp_mask)
 {
        int i;
 
        for (i = 0; i < ulist->nnodes; ++i) {
-               if (ulist->nodes[i].val == val)
+               if (ulist->nodes[i].val == val) {
+                       if (old_aux)
+                               *old_aux = ulist->nodes[i].aux;
                        return 0;
+               }
        }
 
        if (ulist->nnodes >= ulist->nodes_alloced) {
@@ -188,33 +197,26 @@ EXPORT_SYMBOL(ulist_add);
 /**
  * ulist_next - iterate ulist
  * @ulist:     ulist to iterate
- * @prev:      previously returned element or %NULL to start iteration
+ * @uiter:     iterator variable, initialized with ULIST_ITER_INIT(&iterator)
  *
  * Note: locking must be provided by the caller. In case of rwlocks only read
  *       locking is needed
  *
- * This function is used to iterate an ulist. The iteration is started with
- * @prev = %NULL. It returns the next element from the ulist or %NULL when the
+ * This function is used to iterate an ulist.
+ * It returns the next element from the ulist or %NULL when the
  * end is reached. No guarantee is made with respect to the order in which
  * the elements are returned. They might neither be returned in order of
  * addition nor in ascending order.
  * It is allowed to call ulist_add during an enumeration. Newly added items
  * are guaranteed to show up in the running enumeration.
  */
-struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev)
+struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
 {
-       int next;
-
        if (ulist->nnodes == 0)
                return NULL;
-
-       if (!prev)
-               return &ulist->nodes[0];
-
-       next = (prev - ulist->nodes) + 1;
-       if (next < 0 || next >= ulist->nnodes)
+       if (uiter->i < 0 || uiter->i >= ulist->nnodes)
                return NULL;
 
-       return &ulist->nodes[next];
+       return &ulist->nodes[uiter->i++];
 }
 EXPORT_SYMBOL(ulist_next);
index 2e25dec58ec0e56251fbca880d27cc927aac95dc..21bdc8ec813046ac56e3c7db0739bcdba7ac188a 100644 (file)
  */
 #define ULIST_SIZE 16
 
+struct ulist_iterator {
+       int i;
+};
+
 /*
  * element of the list
  */
@@ -59,10 +63,15 @@ struct ulist {
 void ulist_init(struct ulist *ulist);
 void ulist_fini(struct ulist *ulist);
 void ulist_reinit(struct ulist *ulist);
-struct ulist *ulist_alloc(unsigned long gfp_mask);
+struct ulist *ulist_alloc(gfp_t gfp_mask);
 void ulist_free(struct ulist *ulist);
 int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
-             unsigned long gfp_mask);
-struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev);
+             gfp_t gfp_mask);
+int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
+                   unsigned long *old_aux, gfp_t gfp_mask);
+struct ulist_node *ulist_next(struct ulist *ulist,
+                             struct ulist_iterator *uiter);
+
+#define ULIST_ITER_INIT(uiter) ((uiter)->i = 0)
 
 #endif
index 1411b99555a4c1f138a6a3bf699842849d2b3e08..ecaad40e7ef499fedb7667659d9efd955a661eaa 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/random.h>
 #include <linux/iocontext.h>
 #include <linux/capability.h>
+#include <linux/ratelimit.h>
 #include <linux/kthread.h>
 #include <asm/div64.h>
 #include "compat.h"
 #include "volumes.h"
 #include "async-thread.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                struct btrfs_device *device);
 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
+static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 
 static DEFINE_MUTEX(uuid_mutex);
 static LIST_HEAD(fs_uuids);
@@ -61,7 +65,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
                device = list_entry(fs_devices->devices.next,
                                    struct btrfs_device, dev_list);
                list_del(&device->dev_list);
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
        }
        kfree(fs_devices);
@@ -331,8 +335,8 @@ static noinline int device_list_add(const char *path,
 {
        struct btrfs_device *device;
        struct btrfs_fs_devices *fs_devices;
+       struct rcu_string *name;
        u64 found_transid = btrfs_super_generation(disk_super);
-       char *name;
 
        fs_devices = find_fsid(disk_super->fsid);
        if (!fs_devices) {
@@ -361,15 +365,18 @@ static noinline int device_list_add(const char *path,
                        return -ENOMEM;
                }
                device->devid = devid;
+               device->dev_stats_valid = 0;
                device->work.func = pending_bios_fn;
                memcpy(device->uuid, disk_super->dev_item.uuid,
                       BTRFS_UUID_SIZE);
                spin_lock_init(&device->io_lock);
-               device->name = kstrdup(path, GFP_NOFS);
-               if (!device->name) {
+
+               name = rcu_string_strdup(path, GFP_NOFS);
+               if (!name) {
                        kfree(device);
                        return -ENOMEM;
                }
+               rcu_assign_pointer(device->name, name);
                INIT_LIST_HEAD(&device->dev_alloc_list);
 
                /* init readahead state */
@@ -386,12 +393,12 @@ static noinline int device_list_add(const char *path,
 
                device->fs_devices = fs_devices;
                fs_devices->num_devices++;
-       } else if (!device->name || strcmp(device->name, path)) {
-               name = kstrdup(path, GFP_NOFS);
+       } else if (!device->name || strcmp(device->name->str, path)) {
+               name = rcu_string_strdup(path, GFP_NOFS);
                if (!name)
                        return -ENOMEM;
-               kfree(device->name);
-               device->name = name;
+               rcu_string_free(device->name);
+               rcu_assign_pointer(device->name, name);
                if (device->missing) {
                        fs_devices->missing_devices--;
                        device->missing = 0;
@@ -426,15 +433,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 
        /* We have held the volume lock, it is safe to get the devices. */
        list_for_each_entry(orig_dev, &orig->devices, dev_list) {
+               struct rcu_string *name;
+
                device = kzalloc(sizeof(*device), GFP_NOFS);
                if (!device)
                        goto error;
 
-               device->name = kstrdup(orig_dev->name, GFP_NOFS);
-               if (!device->name) {
+               /*
+                * This is ok to do without rcu read locked because we hold the
+                * uuid mutex so nothing we touch in here is going to disappear.
+                */
+               name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
+               if (!name) {
                        kfree(device);
                        goto error;
                }
+               rcu_assign_pointer(device->name, name);
 
                device->devid = orig_dev->devid;
                device->work.func = pending_bios_fn;
@@ -487,7 +501,7 @@ again:
                }
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
        }
 
@@ -512,7 +526,7 @@ static void __free_device(struct work_struct *work)
        if (device->bdev)
                blkdev_put(device->bdev, device->mode);
 
-       kfree(device->name);
+       rcu_string_free(device->name);
        kfree(device);
 }
 
@@ -536,6 +550,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
        mutex_lock(&fs_devices->device_list_mutex);
        list_for_each_entry(device, &fs_devices->devices, dev_list) {
                struct btrfs_device *new_device;
+               struct rcu_string *name;
 
                if (device->bdev)
                        fs_devices->open_devices--;
@@ -551,8 +566,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
                new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
                BUG_ON(!new_device); /* -ENOMEM */
                memcpy(new_device, device, sizeof(*new_device));
-               new_device->name = kstrdup(device->name, GFP_NOFS);
-               BUG_ON(device->name && !new_device->name); /* -ENOMEM */
+
+               /* Safe because we are under uuid_mutex */
+               name = rcu_string_strdup(device->name->str, GFP_NOFS);
+               BUG_ON(device->name && !name); /* -ENOMEM */
+               rcu_assign_pointer(new_device->name, name);
                new_device->bdev = NULL;
                new_device->writeable = 0;
                new_device->in_fs_metadata = 0;
@@ -617,9 +635,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                if (!device->name)
                        continue;
 
-               bdev = blkdev_get_by_path(device->name, flags, holder);
+               bdev = blkdev_get_by_path(device->name->str, flags, holder);
                if (IS_ERR(bdev)) {
-                       printk(KERN_INFO "open %s failed\n", device->name);
+                       printk(KERN_INFO "open %s failed\n", device->name->str);
                        goto error;
                }
                filemap_write_and_wait(bdev->bd_inode->i_mapping);
@@ -1628,12 +1646,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        struct block_device *bdev;
        struct list_head *devices;
        struct super_block *sb = root->fs_info->sb;
+       struct rcu_string *name;
        u64 total_bytes;
        int seeding_dev = 0;
        int ret = 0;
 
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
-               return -EINVAL;
+               return -EROFS;
 
        bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
                                  root->fs_info->bdev_holder);
@@ -1667,23 +1686,24 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                goto error;
        }
 
-       device->name = kstrdup(device_path, GFP_NOFS);
-       if (!device->name) {
+       name = rcu_string_strdup(device_path, GFP_NOFS);
+       if (!name) {
                kfree(device);
                ret = -ENOMEM;
                goto error;
        }
+       rcu_assign_pointer(device->name, name);
 
        ret = find_next_devid(root, &device->devid);
        if (ret) {
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
                goto error;
        }
 
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
                ret = PTR_ERR(trans);
                goto error;
@@ -1792,7 +1812,7 @@ error_trans:
        unlock_chunks(root);
        btrfs_abort_transaction(trans, root, ret);
        btrfs_end_transaction(trans, root);
-       kfree(device->name);
+       rcu_string_free(device->name);
        kfree(device);
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -2825,31 +2845,48 @@ out:
 
 static int balance_kthread(void *data)
 {
-       struct btrfs_balance_control *bctl =
-                       (struct btrfs_balance_control *)data;
-       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
-       set_balance_control(bctl);
-
-       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
-               printk(KERN_INFO "btrfs: force skipping balance\n");
-       } else {
+       if (fs_info->balance_ctl) {
                printk(KERN_INFO "btrfs: continuing balance\n");
-               ret = btrfs_balance(bctl, NULL);
+               ret = btrfs_balance(fs_info->balance_ctl, NULL);
        }
 
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+
        return ret;
 }
 
-int btrfs_recover_balance(struct btrfs_root *tree_root)
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
 {
        struct task_struct *tsk;
+
+       spin_lock(&fs_info->balance_lock);
+       if (!fs_info->balance_ctl) {
+               spin_unlock(&fs_info->balance_lock);
+               return 0;
+       }
+       spin_unlock(&fs_info->balance_lock);
+
+       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+               printk(KERN_INFO "btrfs: force skipping balance\n");
+               return 0;
+       }
+
+       tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
+       if (IS_ERR(tsk))
+               return PTR_ERR(tsk);
+
+       return 0;
+}
+
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
+{
        struct btrfs_balance_control *bctl;
        struct btrfs_balance_item *item;
        struct btrfs_disk_balance_args disk_bargs;
@@ -2862,29 +2899,30 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        if (!path)
                return -ENOMEM;
 
-       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
-       if (!bctl) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        key.objectid = BTRFS_BALANCE_OBJECTID;
        key.type = BTRFS_BALANCE_ITEM_KEY;
        key.offset = 0;
 
-       ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
        if (ret < 0)
-               goto out_bctl;
+               goto out;
        if (ret > 0) { /* ret = -ENOENT; */
                ret = 0;
-               goto out_bctl;
+               goto out;
+       }
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out;
        }
 
        leaf = path->nodes[0];
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
 
-       bctl->fs_info = tree_root->fs_info;
-       bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+       bctl->fs_info = fs_info;
+       bctl->flags = btrfs_balance_flags(leaf, item);
+       bctl->flags |= BTRFS_BALANCE_RESUME;
 
        btrfs_balance_data(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
@@ -2893,14 +2931,13 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        btrfs_balance_sys(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
 
-       tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
-       if (IS_ERR(tsk))
-               ret = PTR_ERR(tsk);
-       else
-               goto out;
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
 
-out_bctl:
-       kfree(bctl);
+       set_balance_control(bctl);
+
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
 out:
        btrfs_free_path(path);
        return ret;
@@ -4001,13 +4038,60 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        return 0;
 }
 
+static void *merge_stripe_index_into_bio_private(void *bi_private,
+                                                unsigned int stripe_index)
+{
+       /*
+        * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
+        * at most 1.
+        * The alternative solution (instead of stealing bits from the
+        * pointer) would be to allocate an intermediate structure
+        * that contains the old private pointer plus the stripe_index.
+        */
+       BUG_ON((((uintptr_t)bi_private) & 3) != 0);
+       BUG_ON(stripe_index > 3);
+       return (void *)(((uintptr_t)bi_private) | stripe_index);
+}
+
+static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
+{
+       return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
+}
+
+static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
+{
+       return (unsigned int)((uintptr_t)bi_private) & 3;
+}
+
 static void btrfs_end_bio(struct bio *bio, int err)
 {
-       struct btrfs_bio *bbio = bio->bi_private;
+       struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
        int is_orig_bio = 0;
 
-       if (err)
+       if (err) {
                atomic_inc(&bbio->error);
+               if (err == -EIO || err == -EREMOTEIO) {
+                       unsigned int stripe_index =
+                               extract_stripe_index_from_bio_private(
+                                       bio->bi_private);
+                       struct btrfs_device *dev;
+
+                       BUG_ON(stripe_index >= bbio->num_stripes);
+                       dev = bbio->stripes[stripe_index].dev;
+                       if (dev->bdev) {
+                               if (bio->bi_rw & WRITE)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_WRITE_ERRS);
+                               else
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_READ_ERRS);
+                               if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_FLUSH_ERRS);
+                               btrfs_dev_stat_print_on_error(dev);
+                       }
+               }
+       }
 
        if (bio == bbio->orig_bio)
                is_orig_bio = 1;
@@ -4149,14 +4233,23 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                        bio = first_bio;
                }
                bio->bi_private = bbio;
+               bio->bi_private = merge_stripe_index_into_bio_private(
+                               bio->bi_private, (unsigned int)dev_nr);
                bio->bi_end_io = btrfs_end_bio;
                bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
                dev = bbio->stripes[dev_nr].dev;
                if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
+#ifdef DEBUG
+                       struct rcu_string *name;
+
+                       rcu_read_lock();
+                       name = rcu_dereference(dev->name);
                        pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
                                 "(%s id %llu), size=%u\n", rw,
                                 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
-                                dev->name, dev->devid, bio->bi_size);
+                                name->str, dev->devid, bio->bi_size);
+                       rcu_read_unlock();
+#endif
                        bio->bi_bdev = dev->bdev;
                        if (async_submit)
                                schedule_bio(root, dev, rw, bio);
@@ -4509,6 +4602,28 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        return ret;
 }
 
+struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
+                                                  u64 logical, int mirror_num)
+{
+       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+       int ret;
+       u64 map_length = 0;
+       struct btrfs_bio *bbio = NULL;
+       struct btrfs_device *device;
+
+       BUG_ON(mirror_num == 0);
+       ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
+                             mirror_num);
+       if (ret) {
+               BUG_ON(bbio != NULL);
+               return NULL;
+       }
+       BUG_ON(mirror_num != bbio->mirror_num);
+       device = bbio->stripes[mirror_num - 1].dev;
+       kfree(bbio);
+       return device;
+}
+
 int btrfs_read_chunk_tree(struct btrfs_root *root)
 {
        struct btrfs_path *path;
@@ -4583,3 +4698,231 @@ error:
        btrfs_free_path(path);
        return ret;
 }
+
+static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
+{
+       int i;
+
+       for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+               btrfs_dev_stat_reset(dev, i);
+}
+
+int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_key key;
+       struct btrfs_key found_key;
+       struct btrfs_root *dev_root = fs_info->dev_root;
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+       struct extent_buffer *eb;
+       int slot;
+       int ret = 0;
+       struct btrfs_device *device;
+       struct btrfs_path *path = NULL;
+       int i;
+
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               int item_size;
+               struct btrfs_dev_stats_item *ptr;
+
+               key.objectid = 0;
+               key.type = BTRFS_DEV_STATS_KEY;
+               key.offset = device->devid;
+               ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
+               if (ret) {
+                       printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
+                                     rcu_str_deref(device->name),
+                                     (unsigned long long)device->devid);
+                       __btrfs_reset_dev_stats(device);
+                       device->dev_stats_valid = 1;
+                       btrfs_release_path(path);
+                       continue;
+               }
+               slot = path->slots[0];
+               eb = path->nodes[0];
+               btrfs_item_key_to_cpu(eb, &found_key, slot);
+               item_size = btrfs_item_size_nr(eb, slot);
+
+               ptr = btrfs_item_ptr(eb, slot,
+                                    struct btrfs_dev_stats_item);
+
+               for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
+                       if (item_size >= (1 + i) * sizeof(__le64))
+                               btrfs_dev_stat_set(device, i,
+                                       btrfs_dev_stats_value(eb, ptr, i));
+                       else
+                               btrfs_dev_stat_reset(device, i);
+               }
+
+               device->dev_stats_valid = 1;
+               btrfs_dev_stat_print_on_load(device);
+               btrfs_release_path(path);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+out:
+       btrfs_free_path(path);
+       return ret < 0 ? ret : 0;
+}
+
+static int update_dev_stat_item(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *dev_root,
+                               struct btrfs_device *device)
+{
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       struct extent_buffer *eb;
+       struct btrfs_dev_stats_item *ptr;
+       int ret;
+       int i;
+
+       key.objectid = 0;
+       key.type = BTRFS_DEV_STATS_KEY;
+       key.offset = device->devid;
+
+       path = btrfs_alloc_path();
+       BUG_ON(!path);
+       ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
+       if (ret < 0) {
+               printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
+                             ret, rcu_str_deref(device->name));
+               goto out;
+       }
+
+       if (ret == 0 &&
+           btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
+               /* need to delete old one and insert a new one */
+               ret = btrfs_del_item(trans, dev_root, path);
+               if (ret != 0) {
+                       printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
+                                     rcu_str_deref(device->name), ret);
+                       goto out;
+               }
+               ret = 1;
+       }
+
+       if (ret == 1) {
+               /* need to insert a new item */
+               btrfs_release_path(path);
+               ret = btrfs_insert_empty_item(trans, dev_root, path,
+                                             &key, sizeof(*ptr));
+               if (ret < 0) {
+                       printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
+                                     rcu_str_deref(device->name), ret);
+                       goto out;
+               }
+       }
+
+       eb = path->nodes[0];
+       ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
+       for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+               btrfs_set_dev_stats_value(eb, ptr, i,
+                                         btrfs_dev_stat_read(device, i));
+       btrfs_mark_buffer_dirty(eb);
+
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+/*
+ * called from commit_transaction. Writes all changed device stats to disk.
+ */
+int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+                       struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_root *dev_root = fs_info->dev_root;
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+       struct btrfs_device *device;
+       int ret = 0;
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               if (!device->dev_stats_valid || !device->dev_stats_dirty)
+                       continue;
+
+               ret = update_dev_stat_item(trans, dev_root, device);
+               if (!ret)
+                       device->dev_stats_dirty = 0;
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+       return ret;
+}
+
+void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
+{
+       btrfs_dev_stat_inc(dev, index);
+       btrfs_dev_stat_print_on_error(dev);
+}
+
+void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
+{
+       if (!dev->dev_stats_valid)
+               return;
+       printk_ratelimited_in_rcu(KERN_ERR
+                          "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+                          rcu_str_deref(dev->name),
+                          btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
+                          btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
+                          btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
+                          btrfs_dev_stat_read(dev,
+                                              BTRFS_DEV_STAT_CORRUPTION_ERRS),
+                          btrfs_dev_stat_read(dev,
+                                              BTRFS_DEV_STAT_GENERATION_ERRS));
+}
+
+static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
+{
+       printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+              rcu_str_deref(dev->name),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
+}
+
+int btrfs_get_dev_stats(struct btrfs_root *root,
+                       struct btrfs_ioctl_get_dev_stats *stats,
+                       int reset_after_read)
+{
+       struct btrfs_device *dev;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+       int i;
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       dev = btrfs_find_device(root, stats->devid, NULL, NULL);
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+       if (!dev) {
+               printk(KERN_WARNING
+                      "btrfs: get dev_stats failed, device not found\n");
+               return -ENODEV;
+       } else if (!dev->dev_stats_valid) {
+               printk(KERN_WARNING
+                      "btrfs: get dev_stats failed, not yet valid\n");
+               return -ENODEV;
+       } else if (reset_after_read) {
+               for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
+                       if (stats->nr_items > i)
+                               stats->values[i] =
+                                       btrfs_dev_stat_read_and_reset(dev, i);
+                       else
+                               btrfs_dev_stat_reset(dev, i);
+               }
+       } else {
+               for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+                       if (stats->nr_items > i)
+                               stats->values[i] = btrfs_dev_stat_read(dev, i);
+       }
+       if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
+               stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
+       return 0;
+}
index bb6b03f97aaa089793d667fae93335373773a7eb..95f6637614db5932f8d52daba79943514a7ed8da 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/bio.h>
 #include <linux/sort.h>
 #include "async-thread.h"
+#include "ioctl.h"
 
 #define BTRFS_STRIPE_LEN       (64 * 1024)
 
@@ -57,7 +58,7 @@ struct btrfs_device {
        /* the mode sent to blkdev_get */
        fmode_t mode;
 
-       char *name;
+       struct rcu_string *name;
 
        /* the internal btrfs device id */
        u64 devid;
@@ -106,6 +107,11 @@ struct btrfs_device {
        struct completion flush_wait;
        int nobarriers;
 
+       /* disk I/O failure stats. For detailed description refer to
+        * enum btrfs_dev_stat_values in ioctl.h */
+       int dev_stats_valid;
+       int dev_stats_dirty; /* counters need to be written to disk */
+       atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
 };
 
 struct btrfs_fs_devices {
@@ -275,10 +281,57 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_balance(struct btrfs_balance_control *bctl,
                  struct btrfs_ioctl_balance_args *bargs);
-int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
 int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
                         u64 *start, u64 *max_avail);
+struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
+                                                  u64 logical, int mirror_num);
+void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
+void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
+int btrfs_get_dev_stats(struct btrfs_root *root,
+                       struct btrfs_ioctl_get_dev_stats *stats,
+                       int reset_after_read);
+int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
+int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+                       struct btrfs_fs_info *fs_info);
+
+static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
+                                     int index)
+{
+       atomic_inc(dev->dev_stat_values + index);
+       dev->dev_stats_dirty = 1;
+}
+
+static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
+                                     int index)
+{
+       return atomic_read(dev->dev_stat_values + index);
+}
+
+static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
+                                               int index)
+{
+       int ret;
+
+       ret = atomic_xchg(dev->dev_stat_values + index, 0);
+       dev->dev_stats_dirty = 1;
+       return ret;
+}
+
+static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
+                                     int index, unsigned long val)
+{
+       atomic_set(dev->dev_stat_values + index, val);
+       dev->dev_stats_dirty = 1;
+}
+
+static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
+                                       int index)
+{
+       btrfs_dev_stat_set(dev, index, 0);
+}
 #endif
index e7a5659087e66f93769bc750562d21294c9bd2b6..3f4e2d69e83a13cb66f3f3a56024f53f5299f5c4 100644 (file)
@@ -196,6 +196,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
        if (ret)
                goto out;
 
+       inode_inc_iversion(inode);
        inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
index ad5938ca357c270ace08388401176f22a6343571..c7062c896d7c20489f41ea838640a5cfa5387ca9 100644 (file)
@@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
+       int ret;
+       struct buffer_head *bh;
+
        /* Size must be multiple of hard sectorsize */
        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
@@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
                return NULL;
        }
 
-       for (;;) {
-               struct buffer_head * bh;
-               int ret;
+retry:
+       bh = __find_get_block(bdev, block, size);
+       if (bh)
+               return bh;
 
+       ret = grow_buffers(bdev, block, size);
+       if (ret == 0) {
+               free_more_memory();
+               goto retry;
+       } else if (ret > 0) {
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
-
-               ret = grow_buffers(bdev, block, size);
-               if (ret < 0)
-                       return NULL;
-               if (ret == 0)
-                       free_more_memory();
        }
+       return NULL;
 }
 
 /*
@@ -3152,7 +3156,7 @@ SYSCALL_DEFINE2(bdflush, int, func, long, data)
 /*
  * Buffer-head allocation
  */
-static struct kmem_cache *bh_cachep;
+static struct kmem_cache *bh_cachep __read_mostly;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
index 173b1d22e59b5a4bf8ed714f72b233cb70537468..8b67304e4b8079efe80eecd716fb1e98b5486842 100644 (file)
        (CONGESTION_ON_THRESH(congestion_kb) -                          \
         (CONGESTION_ON_THRESH(congestion_kb) >> 2))
 
-
+static inline struct ceph_snap_context *page_snap_context(struct page *page)
+{
+       if (PagePrivate(page))
+               return (void *)page->private;
+       return NULL;
+}
 
 /*
  * Dirty a page.  Optimistically adjust accounting, on the assumption
@@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
-       struct ceph_snap_context *snapc = (void *)page->private;
+       struct ceph_snap_context *snapc = page_snap_context(page);
 
        BUG_ON(!PageLocked(page));
-       BUG_ON(!page->private);
        BUG_ON(!PagePrivate(page));
        BUG_ON(!page->mapping);
 
@@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g)
        struct inode *inode = page->mapping ? page->mapping->host : NULL;
        dout("%p releasepage %p idx %lu\n", inode, page, page->index);
        WARN_ON(PageDirty(page));
-       WARN_ON(page->private);
        WARN_ON(PagePrivate(page));
        return 0;
 }
@@ -443,7 +446,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        osdc = &fsc->client->osdc;
 
        /* verify this is a writeable snap context */
-       snapc = (void *)page->private;
+       snapc = page_snap_context(page);
        if (snapc == NULL) {
                dout("writepage %p page %p not dirty?\n", inode, page);
                goto out;
@@ -451,7 +454,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        oldest = get_oldest_context(inode, &snap_size);
        if (snapc->seq > oldest->seq) {
                dout("writepage %p page %p snapc %p not writeable - noop\n",
-                    inode, page, (void *)page->private);
+                    inode, page, snapc);
                /* we should only noop if called by kswapd */
                WARN_ON((current->flags & PF_MEMALLOC) == 0);
                ceph_put_snap_context(oldest);
@@ -591,7 +594,7 @@ static void writepages_finish(struct ceph_osd_request *req,
                        clear_bdi_congested(&fsc->backing_dev_info,
                                            BLK_RW_ASYNC);
 
-               ceph_put_snap_context((void *)page->private);
+               ceph_put_snap_context(page_snap_context(page));
                page->private = 0;
                ClearPagePrivate(page);
                dout("unlocking %d %p\n", i, page);
@@ -795,7 +798,7 @@ get_more_pages:
                        }
 
                        /* only if matching snap context */
-                       pgsnapc = (void *)page->private;
+                       pgsnapc = page_snap_context(page);
                        if (pgsnapc->seq > snapc->seq) {
                                dout("page snapc %p %lld > oldest %p %lld\n",
                                     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
@@ -984,7 +987,7 @@ retry_locked:
        BUG_ON(!ci->i_snap_realm);
        down_read(&mdsc->snap_rwsem);
        BUG_ON(!ci->i_snap_realm->cached_context);
-       snapc = (void *)page->private;
+       snapc = page_snap_context(page);
        if (snapc && snapc != ci->i_head_snapc) {
                /*
                 * this page is already dirty in another (older) snap
index fbb2a643ef10a1f75c4918f165c9e3a22a603a86..8e1b60e557b65bea0df86a881376456658a9cffd 100644 (file)
@@ -40,38 +40,49 @@ struct ceph_nfs_confh {
        u32 parent_name_hash;
 } __attribute__ ((packed));
 
-static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
-                         int connectable)
+/*
+ * The presence of @parent_inode here tells us whether NFS wants a
+ * connectable file handle.  However, we want to make a connectionable
+ * file handle unconditionally so that the MDS gets as much of a hint
+ * as possible.  That means we only use @parent_dentry to indicate
+ * whether nfsd wants a connectable fh, and whether we should indicate
+ * failure from a too-small @max_len.
+ */
+static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+                         struct inode *parent_inode)
 {
        int type;
        struct ceph_nfs_fh *fh = (void *)rawfh;
        struct ceph_nfs_confh *cfh = (void *)rawfh;
-       struct dentry *parent;
-       struct inode *inode = dentry->d_inode;
        int connected_handle_length = sizeof(*cfh)/4;
        int handle_length = sizeof(*fh)/4;
+       struct dentry *dentry = d_find_alias(inode);
+       struct dentry *parent;
 
        /* don't re-export snaps */
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EINVAL;
 
-       spin_lock(&dentry->d_lock);
-       parent = dentry->d_parent;
-       if (*max_len >= connected_handle_length) {
+       /* if we found an alias, generate a connectable fh */
+       if (*max_len >= connected_handle_length && dentry) {
                dout("encode_fh %p connectable\n", dentry);
-               cfh->ino = ceph_ino(dentry->d_inode);
+               spin_lock(&dentry->d_lock);
+               parent = dentry->d_parent;
+               cfh->ino = ceph_ino(inode);
                cfh->parent_ino = ceph_ino(parent->d_inode);
                cfh->parent_name_hash = ceph_dentry_hash(parent->d_inode,
                                                         dentry);
                *max_len = connected_handle_length;
                type = 2;
+               spin_unlock(&dentry->d_lock);
        } else if (*max_len >= handle_length) {
-               if (connectable) {
+               if (parent_inode) {
+                       /* nfsd wants connectable */
                        *max_len = connected_handle_length;
                        type = 255;
                } else {
                        dout("encode_fh %p\n", dentry);
-                       fh->ino = ceph_ino(dentry->d_inode);
+                       fh->ino = ceph_ino(inode);
                        *max_len = handle_length;
                        type = 1;
                }
@@ -79,7 +90,6 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
                *max_len = handle_length;
                type = 255;
        }
-       spin_unlock(&dentry->d_lock);
        return type;
 }
 
index ed72428d9c75c80a6744ccd6a996b83c1a20d333..988d4f302e4880281a2b5e04c9f44dd7870202d2 100644 (file)
@@ -54,7 +54,6 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
        req->r_fmode = ceph_flags_to_mode(flags);
        req->r_args.open.flags = cpu_to_le32(flags);
        req->r_args.open.mode = cpu_to_le32(create_mode);
-       req->r_args.open.preferred = cpu_to_le32(-1);
 out:
        return req;
 }
index 790914a598dd5d68b8f40b851c2faff2e790e4af..8e3fb69fbe62e60cd3698c07d9fa8e53d6d2b184 100644 (file)
@@ -26,8 +26,7 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
                l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
                l.object_size = ceph_file_layout_object_size(ci->i_layout);
                l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-               l.preferred_osd =
-                       (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
+               l.preferred_osd = (s32)-1;
                if (copy_to_user(arg, &l, sizeof(l)))
                        return -EFAULT;
        }
@@ -35,6 +34,32 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
        return err;
 }
 
+static long __validate_layout(struct ceph_mds_client *mdsc,
+                             struct ceph_ioctl_layout *l)
+{
+       int i, err;
+
+       /* validate striping parameters */
+       if ((l->object_size & ~PAGE_MASK) ||
+           (l->stripe_unit & ~PAGE_MASK) ||
+           ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+               return -EINVAL;
+
+       /* make sure it's a valid data pool */
+       mutex_lock(&mdsc->mutex);
+       err = -EINVAL;
+       for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+               if (mdsc->mdsmap->m_data_pg_pools[i] == l->data_pool) {
+                       err = 0;
+                       break;
+               }
+       mutex_unlock(&mdsc->mutex);
+       if (err)
+               return err;
+
+       return 0;
+}
+
 static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 {
        struct inode *inode = file->f_dentry->d_inode;
@@ -44,52 +69,40 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
        struct ceph_ioctl_layout l;
        struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
        struct ceph_ioctl_layout nl;
-       int err, i;
+       int err;
 
        if (copy_from_user(&l, arg, sizeof(l)))
                return -EFAULT;
 
        /* validate changed params against current layout */
        err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
-       if (!err) {
-               nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
-               nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
-               nl.object_size = ceph_file_layout_object_size(ci->i_layout);
-               nl.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-               nl.preferred_osd =
-                               (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
-       } else
+       if (err)
                return err;
 
+       memset(&nl, 0, sizeof(nl));
        if (l.stripe_count)
                nl.stripe_count = l.stripe_count;
+       else
+               nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
        if (l.stripe_unit)
                nl.stripe_unit = l.stripe_unit;
+       else
+               nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
        if (l.object_size)
                nl.object_size = l.object_size;
+       else
+               nl.object_size = ceph_file_layout_object_size(ci->i_layout);
        if (l.data_pool)
                nl.data_pool = l.data_pool;
-       if (l.preferred_osd)
-               nl.preferred_osd = l.preferred_osd;
+       else
+               nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
 
-       if ((nl.object_size & ~PAGE_MASK) ||
-           (nl.stripe_unit & ~PAGE_MASK) ||
-           ((unsigned)nl.object_size % (unsigned)nl.stripe_unit))
-               return -EINVAL;
+       /* this is obsolete, and always -1 */
+       nl.preferred_osd = le64_to_cpu(-1);
 
-       /* make sure it's a valid data pool */
-       if (l.data_pool > 0) {
-               mutex_lock(&mdsc->mutex);
-               err = -EINVAL;
-               for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
-                       if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
-                               err = 0;
-                               break;
-                       }
-               mutex_unlock(&mdsc->mutex);
-               if (err)
-                       return err;
-       }
+       err = __validate_layout(mdsc, &nl);
+       if (err)
+               return err;
 
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
                                       USE_AUTH_MDS);
@@ -106,8 +119,6 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
        req->r_args.setlayout.layout.fl_object_size =
                cpu_to_le32(l.object_size);
        req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
-       req->r_args.setlayout.layout.fl_pg_preferred =
-               cpu_to_le32(l.preferred_osd);
 
        parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
        err = ceph_mdsc_do_request(mdsc, parent_inode, req);
@@ -127,33 +138,16 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_mds_request *req;
        struct ceph_ioctl_layout l;
-       int err, i;
+       int err;
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 
        /* copy and validate */
        if (copy_from_user(&l, arg, sizeof(l)))
                return -EFAULT;
 
-       if ((l.object_size & ~PAGE_MASK) ||
-           (l.stripe_unit & ~PAGE_MASK) ||
-           !l.stripe_unit ||
-           (l.object_size &&
-               (unsigned)l.object_size % (unsigned)l.stripe_unit))
-               return -EINVAL;
-
-       /* make sure it's a valid data pool */
-       if (l.data_pool > 0) {
-               mutex_lock(&mdsc->mutex);
-               err = -EINVAL;
-               for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
-                       if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
-                               err = 0;
-                               break;
-                       }
-               mutex_unlock(&mdsc->mutex);
-               if (err)
-                       return err;
-       }
+       err = __validate_layout(mdsc, &l);
+       if (err)
+               return err;
 
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
                                       USE_AUTH_MDS);
@@ -171,8 +165,6 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
                        cpu_to_le32(l.object_size);
        req->r_args.setlayout.layout.fl_pg_pool =
                        cpu_to_le32(l.data_pool);
-       req->r_args.setlayout.layout.fl_pg_preferred =
-                       cpu_to_le32(l.preferred_osd);
 
        err = ceph_mdsc_do_request(mdsc, inode, req);
        ceph_mdsc_put_request(req);
index be4a604873331dc7c547950650a899b514f56d26..c77028afb1e1e6b52315a73d013a26fb5c9c7f5b 100644 (file)
@@ -34,6 +34,8 @@
 struct ceph_ioctl_layout {
        __u64 stripe_unit, stripe_count, object_size;
        __u64 data_pool;
+
+       /* obsolete.  new values ignored, always return -1 */
        __s64 preferred_osd;
 };
 
index 89971e137aab80454fed8a51a105d7df903b3101..200bc87eceb1cc417a1caa1a73e264cde79dda78 100644 (file)
@@ -334,10 +334,10 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
        dout("mdsc put_session %p %d -> %d\n", s,
             atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
        if (atomic_dec_and_test(&s->s_ref)) {
-               if (s->s_authorizer)
+               if (s->s_auth.authorizer)
                     s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
                             s->s_mdsc->fsc->client->monc.auth,
-                            s->s_authorizer);
+                            s->s_auth.authorizer);
                kfree(s);
        }
 }
@@ -3395,39 +3395,33 @@ out:
 /*
  * authentication
  */
-static int get_authorizer(struct ceph_connection *con,
-                         void **buf, int *len, int *proto,
-                         void **reply_buf, int *reply_len, int force_new)
+
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately.  Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+                                       int *proto, int force_new)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
-       int ret = 0;
-
-       if (force_new && s->s_authorizer) {
-               ac->ops->destroy_authorizer(ac, s->s_authorizer);
-               s->s_authorizer = NULL;
-       }
-       if (s->s_authorizer == NULL) {
-               if (ac->ops->create_authorizer) {
-                       ret = ac->ops->create_authorizer(
-                               ac, CEPH_ENTITY_TYPE_MDS,
-                               &s->s_authorizer,
-                               &s->s_authorizer_buf,
-                               &s->s_authorizer_buf_len,
-                               &s->s_authorizer_reply_buf,
-                               &s->s_authorizer_reply_buf_len);
-                       if (ret)
-                               return ret;
-               }
-       }
+       struct ceph_auth_handshake *auth = &s->s_auth;
 
+       if (force_new && auth->authorizer) {
+               if (ac->ops && ac->ops->destroy_authorizer)
+                       ac->ops->destroy_authorizer(ac, auth->authorizer);
+               auth->authorizer = NULL;
+       }
+       if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+               int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
+                                                       auth);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
        *proto = ac->protocol;
-       *buf = s->s_authorizer_buf;
-       *len = s->s_authorizer_buf_len;
-       *reply_buf = s->s_authorizer_reply_buf;
-       *reply_len = s->s_authorizer_reply_buf_len;
-       return 0;
+
+       return auth;
 }
 
 
@@ -3437,7 +3431,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
        struct ceph_mds_client *mdsc = s->s_mdsc;
        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
 
-       return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
+       return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
index 8c7c04ebb595a1a8bd2e9c1b177890f8ba234b9a..dd26846dd71de4267146b7deb43c7a5d7b9b976f 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/ceph/types.h>
 #include <linux/ceph/messenger.h>
 #include <linux/ceph/mdsmap.h>
+#include <linux/ceph/auth.h>
 
 /*
  * Some lock dependencies:
@@ -113,9 +114,7 @@ struct ceph_mds_session {
 
        struct ceph_connection s_con;
 
-       struct ceph_authorizer *s_authorizer;
-       void             *s_authorizer_buf, *s_authorizer_reply_buf;
-       size_t            s_authorizer_buf_len, s_authorizer_reply_buf_len;
+       struct ceph_auth_handshake s_auth;
 
        /* protected by s_gen_ttl_lock */
        spinlock_t        s_gen_ttl_lock;
index f04c0961f9937eb6f553978f10942800818528ba..e5206fc765620f3b550c2457e1f271cb9d81c8c8 100644 (file)
@@ -331,7 +331,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
 
        /* alloc new snap context */
        err = -ENOMEM;
-       if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64))
+       if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
                goto fail;
        snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
        if (!snapc)
index 35b86331d8a5ce84c311e9eb2730757f80149179..785cb3057c95a436c344da2d76ee24b86cb7954b 100644 (file)
@@ -118,15 +118,6 @@ static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
                (unsigned long long)ceph_file_layout_su(ci->i_layout),
                (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
                (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-
-       if (ceph_file_layout_pg_preferred(ci->i_layout) >= 0) {
-               val += ret;
-               size -= ret;
-               ret += snprintf(val, size, "preferred_osd=%lld\n",
-                           (unsigned long long)ceph_file_layout_pg_preferred(
-                                   ci->i_layout));
-       }
-
        return ret;
 }
 
index 20350a93ed99105062743e1123e441af52b59a39..6df0cbe1cbc90bc1fd7e978172ed7f05f36e3cbe 100644 (file)
@@ -174,6 +174,7 @@ struct smb_version_operations {
        void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
        void (*set_credits)(struct TCP_Server_Info *, const int);
        int * (*get_credits_field)(struct TCP_Server_Info *);
+       __u64 (*get_next_mid)(struct TCP_Server_Info *);
        /* data offset from read response message */
        unsigned int (*read_data_offset)(char *);
        /* data length from read response message */
@@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val)
        server->ops->set_credits(server, val);
 }
 
+static inline __u64
+get_next_mid(struct TCP_Server_Info *server)
+{
+       return server->ops->get_next_mid(server);
+}
+
 /*
  * Macros to allow the TCP_Server_Info->net field and related code to drop out
  * when CONFIG_NET_NS isn't set.
index 5ec21ecf7980e98a2d51ed9d2edf41c720952d4d..0a6cbfe2761ee5c62d7e31158a28fab7df63c1f0 100644 (file)
@@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
                                void **request_buf);
 extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
                             const struct nls_table *nls_cp);
-extern __u64 GetNextMid(struct TCP_Server_Info *server);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
index b5ad716b2642138ebebdc18a975718f9a09855c8..4ee522b3f66f0036737a688b0c97606ee1efe2ce 100644 (file)
@@ -86,7 +86,31 @@ static struct {
 #endif /* CONFIG_CIFS_WEAK_PW_HASH */
 #endif /* CIFS_POSIX */
 
-/* Forward declarations */
+#ifdef CONFIG_HIGHMEM
+/*
+ * On arches that have high memory, kmap address space is limited. By
+ * serializing the kmap operations on those arches, we ensure that we don't
+ * end up with a bunch of threads in writeback with partially mapped page
+ * arrays, stuck waiting for kmap to come back. That situation prevents
+ * progress and can deadlock.
+ */
+static DEFINE_MUTEX(cifs_kmap_mutex);
+
+static inline void
+cifs_kmap_lock(void)
+{
+       mutex_lock(&cifs_kmap_mutex);
+}
+
+static inline void
+cifs_kmap_unlock(void)
+{
+       mutex_unlock(&cifs_kmap_mutex);
+}
+#else /* !CONFIG_HIGHMEM */
+#define cifs_kmap_lock() do { ; } while(0)
+#define cifs_kmap_unlock() do { ; } while(0)
+#endif /* CONFIG_HIGHMEM */
 
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
@@ -268,7 +292,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
                return rc;
 
        buffer = (struct smb_hdr *)*request_buf;
-       buffer->Mid = GetNextMid(ses->server);
+       buffer->Mid = get_next_mid(ses->server);
        if (ses->capabilities & CAP_UNICODE)
                buffer->Flags2 |= SMBFLG2_UNICODE;
        if (ses->capabilities & CAP_STATUS32)
@@ -402,7 +426,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
 
        cFYI(1, "secFlags 0x%x", secFlags);
 
-       pSMB->hdr.Mid = GetNextMid(server);
+       pSMB->hdr.Mid = get_next_mid(server);
        pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
 
        if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
@@ -782,7 +806,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
                return rc;
        }
 
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
 
        if (ses->server->sec_mode &
                   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
@@ -1503,7 +1527,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        }
 
        /* marshal up the page array */
+       cifs_kmap_lock();
        len = rdata->marshal_iov(rdata, data_len);
+       cifs_kmap_unlock();
        data_len -= len;
 
        /* issue the read if we have any iovecs left to fill */
@@ -2069,7 +2095,9 @@ cifs_async_writev(struct cifs_writedata *wdata)
         * and set the iov_len properly for each one. It may also set
         * wdata->bytes too.
         */
+       cifs_kmap_lock();
        wdata->marshal_iov(iov, wdata);
+       cifs_kmap_unlock();
 
        cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
 
@@ -4762,7 +4790,7 @@ getDFSRetry:
 
        /* server pointer checked in called function,
        but should never be null here anyway */
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
        pSMB->hdr.Tid = ses->ipc_tid;
        pSMB->hdr.Uid = ses->Suid;
        if (ses->capabilities & CAP_STATUS32)
index ccafdedd0dbc4df14e04c0087d28b1dfa453be1f..94b7788c3189281e043ecf82b9bf4fb1e9d0e0f7 100644 (file)
@@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p)
                if (mid_entry != NULL) {
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
-               } else if (!server->ops->is_oplock_break(buf, server)) {
+               } else if (!server->ops->is_oplock_break ||
+                          !server->ops->is_oplock_break(buf, server)) {
                        cERROR(1, "No task to wake, unknown frame received! "
                                   "NumMids %d", atomic_read(&midCount));
                        cifs_dump_mem("Received Data is: ", buf,
                                      HEADER_SIZE(server));
 #ifdef CONFIG_CIFS_DEBUG2
-                       server->ops->dump_detail(buf);
+                       if (server->ops->dump_detail)
+                               server->ops->dump_detail(buf);
                        cifs_dump_mids(server);
 #endif /* CIFS_DEBUG2 */
 
@@ -1651,24 +1653,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                         * If yes, we have encountered a double deliminator
                         * reset the NULL character to the deliminator
                         */
-                       if (tmp_end < end && tmp_end[1] == delim)
+                       if (tmp_end < end && tmp_end[1] == delim) {
                                tmp_end[0] = delim;
 
-                       /* Keep iterating until we get to a single deliminator
-                        * OR the end
-                        */
-                       while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
-                              (tmp_end[1] == delim)) {
-                               tmp_end = (char *) &tmp_end[2];
-                       }
+                               /* Keep iterating until we get to a single
+                                * deliminator OR the end
+                                */
+                               while ((tmp_end = strchr(tmp_end, delim))
+                                       != NULL && (tmp_end[1] == delim)) {
+                                               tmp_end = (char *) &tmp_end[2];
+                               }
 
-                       /* Reset var options to point to next element */
-                       if (tmp_end) {
-                               tmp_end[0] = '\0';
-                               options = (char *) &tmp_end[1];
-                       } else
-                               /* Reached the end of the mount option string */
-                               options = end;
+                               /* Reset var options to point to next element */
+                               if (tmp_end) {
+                                       tmp_end[0] = '\0';
+                                       options = (char *) &tmp_end[1];
+                               } else
+                                       /* Reached the end of the mount option
+                                        * string */
+                                       options = end;
+                       }
 
                        /* Now build new password string */
                        temp_len = strlen(value);
@@ -3441,6 +3445,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
 #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
 
+/*
+ * On hosts with high memory, we can't currently support wsize/rsize that are
+ * larger than we can kmap at once. Cap the rsize/wsize at
+ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
+ * larger than that anyway.
+ */
+#ifdef CONFIG_HIGHMEM
+#define CIFS_KMAP_SIZE_LIMIT   (LAST_PKMAP * PAGE_CACHE_SIZE)
+#else /* CONFIG_HIGHMEM */
+#define CIFS_KMAP_SIZE_LIMIT   (1<<24)
+#endif /* CONFIG_HIGHMEM */
+
 static unsigned int
 cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 {
@@ -3471,6 +3487,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
                wsize = min_t(unsigned int, wsize,
                                server->maxBuf - sizeof(WRITE_REQ) + 4);
 
+       /* limit to the amount that we can kmap at once */
+       wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_WSIZE */
        wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
 
@@ -3491,18 +3510,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
         * MS-CIFS indicates that servers are only limited by the client's
         * bufsize for reads, testing against win98se shows that it throws
         * INVALID_PARAMETER errors if you try to request too large a read.
+        * OS/2 just sends back short reads.
         *
-        * If the server advertises a MaxBufferSize of less than one page,
-        * assume that it also can't satisfy reads larger than that either.
-        *
-        * FIXME: Is there a better heuristic for this?
+        * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
+        * it can't handle a read request larger than its MaxBufferSize either.
         */
        if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
                defsize = CIFS_DEFAULT_IOSIZE;
        else if (server->capabilities & CAP_LARGE_READ_X)
                defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
-       else if (server->maxBuf >= PAGE_CACHE_SIZE)
-               defsize = CIFSMaxBufSize;
        else
                defsize = server->maxBuf - sizeof(READ_RSP);
 
@@ -3515,6 +3531,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
        if (!(server->capabilities & CAP_LARGE_READ_X))
                rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
 
+       /* limit to the amount that we can kmap at once */
+       rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_RSIZE */
        rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
 
@@ -3938,7 +3957,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
        header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
                        NULL /*no tid */ , 4 /*wct */ );
 
-       smb_buffer->Mid = GetNextMid(ses->server);
+       smb_buffer->Mid = get_next_mid(ses->server);
        smb_buffer->Uid = ses->Suid;
        pSMB = (TCONX_REQ *) smb_buffer;
        pSMBr = (TCONX_RSP *) smb_buffer_response;
index 253170dfa71650704c8c2f6cdb69942635488404..513adbc211d7029d1c8b1d6670a1a37d9029cac6 100644 (file)
@@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        struct cifsLockInfo *li, *tmp;
        struct cifs_tcon *tcon;
        struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
-       unsigned int num, max_num;
+       unsigned int num, max_num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
        int types[] = {LOCKING_ANDX_LARGE_FILES,
                       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
@@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
                return rc;
        }
 
-       max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-                 sizeof(LOCKING_ANDX_RANGE);
+       /*
+        * Accessing maxBuf is racy with cifs_reconnect - need to store value
+        * and check it for zero before using.
+        */
+       max_buf = tcon->ses->server->maxBuf;
+       if (!max_buf) {
+               mutex_unlock(&cinode->lock_mutex);
+               FreeXid(xid);
+               return -EINVAL;
+       }
+
+       max_num = (max_buf - sizeof(struct smb_hdr)) /
+                                               sizeof(LOCKING_ANDX_RANGE);
        buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
        if (!buf) {
                mutex_unlock(&cinode->lock_mutex);
@@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
        int types[] = {LOCKING_ANDX_LARGE_FILES,
                       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
        unsigned int i;
-       unsigned int max_num, num;
+       unsigned int max_num, num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
        struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
@@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
 
        INIT_LIST_HEAD(&tmp_llist);
 
-       max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-                 sizeof(LOCKING_ANDX_RANGE);
+       /*
+        * Accessing maxBuf is racy with cifs_reconnect - need to store value
+        * and check it for zero before using.
+        */
+       max_buf = tcon->ses->server->maxBuf;
+       if (!max_buf)
+               return -EINVAL;
+
+       max_num = (max_buf - sizeof(struct smb_hdr)) /
+                                               sizeof(LOCKING_ANDX_RANGE);
        buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                continue;
                        if (types[i] != li->type)
                                continue;
-                       if (!cinode->can_cache_brlcks) {
-                               cur->Pid = cpu_to_le16(li->pid);
-                               cur->LengthLow = cpu_to_le32((u32)li->length);
-                               cur->LengthHigh =
-                                       cpu_to_le32((u32)(li->length>>32));
-                               cur->OffsetLow = cpu_to_le32((u32)li->offset);
-                               cur->OffsetHigh =
-                                       cpu_to_le32((u32)(li->offset>>32));
-                               /*
-                                * We need to save a lock here to let us add
-                                * it again to the file's list if the unlock
-                                * range request fails on the server.
-                                */
-                               list_move(&li->llist, &tmp_llist);
-                               if (++num == max_num) {
-                                       stored_rc = cifs_lockv(xid, tcon,
-                                                              cfile->netfid,
-                                                              li->type, num,
-                                                              0, buf);
-                                       if (stored_rc) {
-                                               /*
-                                                * We failed on the unlock range
-                                                * request - add all locks from
-                                                * the tmp list to the head of
-                                                * the file's list.
-                                                */
-                                               cifs_move_llist(&tmp_llist,
-                                                               &cfile->llist);
-                                               rc = stored_rc;
-                                       } else
-                                               /*
-                                                * The unlock range request
-                                                * succeed - free the tmp list.
-                                                */
-                                               cifs_free_llist(&tmp_llist);
-                                       cur = buf;
-                                       num = 0;
-                               } else
-                                       cur++;
-                       } else {
+                       if (cinode->can_cache_brlcks) {
                                /*
                                 * We can cache brlock requests - simply remove
                                 * a lock from the file's list.
@@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                list_del(&li->llist);
                                cifs_del_lock_waiters(li);
                                kfree(li);
+                               continue;
                        }
+                       cur->Pid = cpu_to_le16(li->pid);
+                       cur->LengthLow = cpu_to_le32((u32)li->length);
+                       cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
+                       cur->OffsetLow = cpu_to_le32((u32)li->offset);
+                       cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
+                       /*
+                        * We need to save a lock here to let us add it again to
+                        * the file's list if the unlock range request fails on
+                        * the server.
+                        */
+                       list_move(&li->llist, &tmp_llist);
+                       if (++num == max_num) {
+                               stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
+                                                      li->type, num, 0, buf);
+                               if (stored_rc) {
+                                       /*
+                                        * We failed on the unlock range
+                                        * request - add all locks from the tmp
+                                        * list to the head of the file's list.
+                                        */
+                                       cifs_move_llist(&tmp_llist,
+                                                       &cfile->llist);
+                                       rc = stored_rc;
+                               } else
+                                       /*
+                                        * The unlock range request succeed -
+                                        * free the tmp list.
+                                        */
+                                       cifs_free_llist(&tmp_llist);
+                               cur = buf;
+                               num = 0;
+                       } else
+                               cur++;
                }
                if (num) {
                        stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
index e2552d2b2e42c551fde38d91e3ead83c6cfccd47..557506ae1e2a1c70c7edcb622b52d55fd0627dc7 100644 (file)
@@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free)
        return;
 }
 
-/*
- * Find a free multiplex id (SMB mid). Otherwise there could be
- * mid collisions which might cause problems, demultiplexing the
- * wrong response to this request. Multiplex ids could collide if
- * one of a series requests takes much longer than the others, or
- * if a very large number of long lived requests (byte range
- * locks or FindNotify requests) are pending. No more than
- * 64K-1 requests can be outstanding at one time. If no
- * mids are available, return zero. A future optimization
- * could make the combination of mids and uid the key we use
- * to demultiplex on (rather than mid alone).
- * In addition to the above check, the cifs demultiplex
- * code already used the command code as a secondary
- * check of the frame and if signing is negotiated the
- * response would be discarded if the mid were the same
- * but the signature was wrong. Since the mid is not put in the
- * pending queue until later (when it is about to be dispatched)
- * we do have to limit the number of outstanding requests
- * to somewhat less than 64K-1 although it is hard to imagine
- * so many threads being in the vfs at one time.
- */
-__u64 GetNextMid(struct TCP_Server_Info *server)
-{
-       __u64 mid = 0;
-       __u16 last_mid, cur_mid;
-       bool collision;
-
-       spin_lock(&GlobalMid_Lock);
-
-       /* mid is 16 bit only for CIFS/SMB */
-       cur_mid = (__u16)((server->CurrentMid) & 0xffff);
-       /* we do not want to loop forever */
-       last_mid = cur_mid;
-       cur_mid++;
-
-       /*
-        * This nested loop looks more expensive than it is.
-        * In practice the list of pending requests is short,
-        * fewer than 50, and the mids are likely to be unique
-        * on the first pass through the loop unless some request
-        * takes longer than the 64 thousand requests before it
-        * (and it would also have to have been a request that
-        * did not time out).
-        */
-       while (cur_mid != last_mid) {
-               struct mid_q_entry *mid_entry;
-               unsigned int num_mids;
-
-               collision = false;
-               if (cur_mid == 0)
-                       cur_mid++;
-
-               num_mids = 0;
-               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
-                       ++num_mids;
-                       if (mid_entry->mid == cur_mid &&
-                           mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
-                               /* This mid is in use, try a different one */
-                               collision = true;
-                               break;
-                       }
-               }
-
-               /*
-                * if we have more than 32k mids in the list, then something
-                * is very wrong. Possibly a local user is trying to DoS the
-                * box by issuing long-running calls and SIGKILL'ing them. If
-                * we get to 2^16 mids then we're in big trouble as this
-                * function could loop forever.
-                *
-                * Go ahead and assign out the mid in this situation, but force
-                * an eventual reconnect to clean out the pending_mid_q.
-                */
-               if (num_mids > 32768)
-                       server->tcpStatus = CifsNeedReconnect;
-
-               if (!collision) {
-                       mid = (__u64)cur_mid;
-                       server->CurrentMid = mid;
-                       break;
-               }
-               cur_mid++;
-       }
-       spin_unlock(&GlobalMid_Lock);
-       return mid;
-}
-
 /* NB: MID can not be set if treeCon not passed in, in that
    case it is responsbility of caller to set the mid */
 void
@@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
 
                        /* Uid is not converted */
                        buffer->Uid = treeCon->ses->Suid;
-                       buffer->Mid = GetNextMid(treeCon->ses->server);
+                       buffer->Mid = get_next_mid(treeCon->ses->server);
                }
                if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
                        buffer->Flags2 |= SMBFLG2_DFS;
index 0a8224d1c4c5f2df8545f2c84f9e668feba2e0e9..a4217f02fab2860ced9f1bf444662cbf05461a4e 100644 (file)
@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
 
        dentry = d_lookup(parent, name);
        if (dentry) {
-               /* FIXME: check for inode number changes? */
-               if (dentry->d_inode != NULL)
+               inode = dentry->d_inode;
+               /* update inode in place if i_ino didn't change */
+               if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+                       cifs_fattr_to_inode(inode, fattr);
                        return dentry;
+               }
                d_drop(dentry);
                dput(dentry);
        }
index d9d615fbed3f5547ad62fadb0c811f339769cede..6dec38f5522d19198beebb23942d21dd850d0dde 100644 (file)
@@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server)
        return &server->credits;
 }
 
+/*
+ * Find a free multiplex id (SMB mid). Otherwise there could be
+ * mid collisions which might cause problems, demultiplexing the
+ * wrong response to this request. Multiplex ids could collide if
+ * one of a series requests takes much longer than the others, or
+ * if a very large number of long lived requests (byte range
+ * locks or FindNotify requests) are pending. No more than
+ * 64K-1 requests can be outstanding at one time. If no
+ * mids are available, return zero. A future optimization
+ * could make the combination of mids and uid the key we use
+ * to demultiplex on (rather than mid alone).
+ * In addition to the above check, the cifs demultiplex
+ * code already used the command code as a secondary
+ * check of the frame and if signing is negotiated the
+ * response would be discarded if the mid were the same
+ * but the signature was wrong. Since the mid is not put in the
+ * pending queue until later (when it is about to be dispatched)
+ * we do have to limit the number of outstanding requests
+ * to somewhat less than 64K-1 although it is hard to imagine
+ * so many threads being in the vfs at one time.
+ */
+static __u64
+cifs_get_next_mid(struct TCP_Server_Info *server)
+{
+       __u64 mid = 0;
+       __u16 last_mid, cur_mid;
+       bool collision;
+
+       spin_lock(&GlobalMid_Lock);
+
+       /* mid is 16 bit only for CIFS/SMB */
+       cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+       /* we do not want to loop forever */
+       last_mid = cur_mid;
+       cur_mid++;
+
+       /*
+        * This nested loop looks more expensive than it is.
+        * In practice the list of pending requests is short,
+        * fewer than 50, and the mids are likely to be unique
+        * on the first pass through the loop unless some request
+        * takes longer than the 64 thousand requests before it
+        * (and it would also have to have been a request that
+        * did not time out).
+        */
+       while (cur_mid != last_mid) {
+               struct mid_q_entry *mid_entry;
+               unsigned int num_mids;
+
+               collision = false;
+               if (cur_mid == 0)
+                       cur_mid++;
+
+               num_mids = 0;
+               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+                       ++num_mids;
+                       if (mid_entry->mid == cur_mid &&
+                           mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
+                               /* This mid is in use, try a different one */
+                               collision = true;
+                               break;
+                       }
+               }
+
+               /*
+                * if we have more than 32k mids in the list, then something
+                * is very wrong. Possibly a local user is trying to DoS the
+                * box by issuing long-running calls and SIGKILL'ing them. If
+                * we get to 2^16 mids then we're in big trouble as this
+                * function could loop forever.
+                *
+                * Go ahead and assign out the mid in this situation, but force
+                * an eventual reconnect to clean out the pending_mid_q.
+                */
+               if (num_mids > 32768)
+                       server->tcpStatus = CifsNeedReconnect;
+
+               if (!collision) {
+                       mid = (__u64)cur_mid;
+                       server->CurrentMid = mid;
+                       break;
+               }
+               cur_mid++;
+       }
+       spin_unlock(&GlobalMid_Lock);
+       return mid;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = {
        .add_credits = cifs_add_credits,
        .set_credits = cifs_set_credits,
        .get_credits_field = cifs_get_credits_field,
+       .get_next_mid = cifs_get_next_mid,
        .read_data_offset = cifs_read_data_offset,
        .read_data_length = cifs_read_data_length,
        .map_error = map_smb_to_linux_error,
index 1b36ffe6a47b5f6888519fea797aebdbe3a9da97..f25d4ea14be4b7d751e69817eca9bad2d6a14935 100644 (file)
@@ -365,16 +365,14 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
        if (mid == NULL)
                return -ENOMEM;
 
-       /* put it on the pending_mid_q */
-       spin_lock(&GlobalMid_Lock);
-       list_add_tail(&mid->qhead, &server->pending_mid_q);
-       spin_unlock(&GlobalMid_Lock);
-
        rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
-       if (rc)
-               delete_mid(mid);
+       if (rc) {
+               DeleteMidQEntry(mid);
+               return rc;
+       }
+
        *ret_mid = mid;
-       return rc;
+       return 0;
 }
 
 /*
@@ -407,17 +405,21 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
        mid->callback_data = cbdata;
        mid->mid_state = MID_REQUEST_SUBMITTED;
 
+       /* put it on the pending_mid_q */
+       spin_lock(&GlobalMid_Lock);
+       list_add_tail(&mid->qhead, &server->pending_mid_q);
+       spin_unlock(&GlobalMid_Lock);
+
+
        cifs_in_send_inc(server);
        rc = smb_sendv(server, iov, nvec);
        cifs_in_send_dec(server);
        cifs_save_when_sent(mid);
        mutex_unlock(&server->srv_mutex);
 
-       if (rc)
-               goto out_err;
+       if (rc == 0)
+               return 0;
 
-       return rc;
-out_err:
        delete_mid(mid);
        add_credits(server, 1);
        wake_up(&server->request_q);
@@ -779,7 +781,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
 
        pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
        pSMB->Timeout = 0;
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
 
        return SendReceive(xid, ses, in_buf, out_buf,
                        &bytes_returned, 0);
index 0781e619a62a48babf8969fa5453994d784ba13c..6161255fac45648efdfe437d9d880d390268d14f 100644 (file)
@@ -532,7 +532,7 @@ out:
 ssize_t compat_rw_copy_check_uvector(int type,
                const struct compat_iovec __user *uvector, unsigned long nr_segs,
                unsigned long fast_segs, struct iovec *fast_pointer,
-               struct iovec **ret_pointer, int check_access)
+               struct iovec **ret_pointer)
 {
        compat_ssize_t tot_len;
        struct iovec *iov = *ret_pointer = fast_pointer;
@@ -579,7 +579,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
                }
                if (len < 0)    /* size_t not fitting in compat_ssize_t .. */
                        goto out;
-               if (check_access &&
+               if (type >= 0 &&
                    !access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
                        ret = -EFAULT;
                        goto out;
@@ -871,12 +871,12 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
 {
        int error;
        struct file *file;
+       int fput_needed;
        struct compat_readdir_callback buf;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.result = 0;
        buf.dirent = dirent;
@@ -885,8 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
        if (buf.result)
                error = buf.result;
 
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -953,16 +952,15 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
        struct file * file;
        struct compat_linux_dirent __user * lastdirent;
        struct compat_getdents_callback buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -979,8 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -1041,16 +1038,15 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
        struct file * file;
        struct linux_dirent64 __user * lastdirent;
        struct compat_getdents_callback64 buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -1068,8 +1064,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 #endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
@@ -1094,7 +1089,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
                goto out;
 
        tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
-                                              UIO_FASTIOV, iovstack, &iov, 1);
+                                              UIO_FASTIOV, iovstack, &iov);
        if (tot_len == 0) {
                ret = 0;
                goto out;
@@ -1547,7 +1542,6 @@ asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg)
                                 compat_ptr(a.exp), compat_ptr(a.tvp));
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 static long do_compat_pselect(int n, compat_ulong_t __user *inp,
        compat_ulong_t __user *outp, compat_ulong_t __user *exp,
        struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
@@ -1670,11 +1664,9 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
 
        return ret;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #ifdef CONFIG_EPOLL
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 asmlinkage long compat_sys_epoll_pwait(int epfd,
                        struct compat_epoll_event __user *events,
                        int maxevents, int timeout,
@@ -1718,7 +1710,6 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
 
        return err;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #endif /* CONFIG_EPOLL */
 
index 4435d8b329044da3b48c83dfe555409464797d0d..40469044088def2607d19540ef0cb8dcce78ac91 100644 (file)
@@ -2575,7 +2575,7 @@ static int prepend_path(const struct path *path,
        bool slash = false;
        int error = 0;
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        while (dentry != root->dentry || vfsmnt != root->mnt) {
                struct dentry * parent;
 
@@ -2606,7 +2606,7 @@ static int prepend_path(const struct path *path,
                error = prepend(buffer, buflen, "/", 1);
 
 out:
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        return error;
 
 global_root:
index f4aadd15b61376ee97a4a7bababe2eb659850820..0c85fae37666db4b18fb2bf9ebf692ae04bcbf40 100644 (file)
@@ -145,50 +145,6 @@ struct dio {
 
 static struct kmem_cache *dio_cache __read_mostly;
 
-static void __inode_dio_wait(struct inode *inode)
-{
-       wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
-       DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
-       do {
-               prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
-               if (atomic_read(&inode->i_dio_count))
-                       schedule();
-       } while (atomic_read(&inode->i_dio_count));
-       finish_wait(wq, &q.wait);
-}
-
-/**
- * inode_dio_wait - wait for outstanding DIO requests to finish
- * @inode: inode to wait for
- *
- * Waits for all pending direct I/O requests to finish so that we can
- * proceed with a truncate or equivalent operation.
- *
- * Must be called under a lock that serializes taking new references
- * to i_dio_count, usually by inode->i_mutex.
- */
-void inode_dio_wait(struct inode *inode)
-{
-       if (atomic_read(&inode->i_dio_count))
-               __inode_dio_wait(inode);
-}
-EXPORT_SYMBOL(inode_dio_wait);
-
-/*
- * inode_dio_done - signal finish of a direct I/O requests
- * @inode: inode the direct I/O happens on
- *
- * This is called once we've finished processing a direct I/O request,
- * and is used to wake up callers waiting for direct I/O to be quiesced.
- */
-void inode_dio_done(struct inode *inode)
-{
-       if (atomic_dec_and_test(&inode->i_dio_count))
-               wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
-}
-EXPORT_SYMBOL(inode_dio_done);
-
 /*
  * How many pages are in the queue?
  */
index ab35b113003b900ad3592217d64e6cef82fe8f9d..a07441a0a8789a9ee1e43f5be0d2b43ec3ee04e8 100644 (file)
@@ -660,11 +660,10 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
 {
        struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
        char *lower_buf;
-       size_t lower_bufsiz = PATH_MAX;
        mm_segment_t old_fs;
        int rc;
 
-       lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
+       lower_buf = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!lower_buf) {
                rc = -ENOMEM;
                goto out;
@@ -673,58 +672,29 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
        set_fs(get_ds());
        rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
                                                   (char __user *)lower_buf,
-                                                  lower_bufsiz);
+                                                  PATH_MAX);
        set_fs(old_fs);
        if (rc < 0)
                goto out;
-       lower_bufsiz = rc;
        rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
-                                                 lower_buf, lower_bufsiz);
+                                                 lower_buf, rc);
 out:
        kfree(lower_buf);
        return rc;
 }
 
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
-       char *kbuf;
-       size_t kbufsiz, copied;
+       char *buf;
+       size_t len = PATH_MAX;
        int rc;
 
-       rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
+       rc = ecryptfs_readlink_lower(dentry, &buf, &len);
        if (rc)
                goto out;
-       copied = min_t(size_t, bufsiz, kbufsiz);
-       rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
-       kfree(kbuf);
        fsstack_copy_attr_atime(dentry->d_inode,
                                ecryptfs_dentry_to_lower(dentry)->d_inode);
-out:
-       return rc;
-}
-
-static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       char *buf;
-       int len = PAGE_SIZE, rc;
-       mm_segment_t old_fs;
-
-       /* Released in ecryptfs_put_link(); only release here on error */
-       buf = kmalloc(len, GFP_KERNEL);
-       if (!buf) {
-               buf = ERR_PTR(-ENOMEM);
-               goto out;
-       }
-       old_fs = get_fs();
-       set_fs(get_ds());
-       rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
-       set_fs(old_fs);
-       if (rc < 0) {
-               kfree(buf);
-               buf = ERR_PTR(rc);
-       } else
-               buf[rc] = '\0';
+       buf[len] = '\0';
 out:
        nd_set_link(nd, buf);
        return NULL;
@@ -1153,7 +1123,7 @@ out:
 }
 
 const struct inode_operations ecryptfs_symlink_iops = {
-       .readlink = ecryptfs_readlink,
+       .readlink = generic_readlink,
        .follow_link = ecryptfs_follow_link,
        .put_link = ecryptfs_put_link,
        .permission = ecryptfs_permission,
index 69f994a7d5249589bf594adaa4780bca967f08b2..0dbe58a8b172c1de225b3457600b50d644b1da35 100644 (file)
@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
        if (!IS_ERR(*lower_file))
                goto out;
-       if (flags & O_RDONLY) {
+       if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
        }
index 3a06f4043df42a811add69fd7ede7b582a94c347..c0038f6566d4df1493d9321821a53adb62b902ab 100644 (file)
@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
        mutex_unlock(&ecryptfs_daemon_hash_mux);
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
                goto out_unlock_daemon;
        }
        daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+       file->private_data = daemon;
        atomic_inc(&ecryptfs_num_miscdev_opens);
 out_unlock_daemon:
        mutex_unlock(&daemon->mux);
@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
 
        mutex_lock(&ecryptfs_daemon_hash_mux);
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon)
+               daemon = file->private_data;
        mutex_lock(&daemon->mux);
-       BUG_ON(daemon->pid != task_pid(current));
        BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
        daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
        atomic_dec(&ecryptfs_num_miscdev_opens);
@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
                          struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
                          u16 msg_flags, struct ecryptfs_daemon *daemon)
 {
-       int rc = 0;
+       struct ecryptfs_message *msg;
 
-       mutex_lock(&msg_ctx->mux);
-       msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
-                              GFP_KERNEL);
-       if (!msg_ctx->msg) {
-               rc = -ENOMEM;
+       msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
+       if (!msg) {
                printk(KERN_ERR "%s: Out of memory whilst attempting "
                       "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
-                      (sizeof(*msg_ctx->msg) + data_size));
-               goto out_unlock;
+                      (sizeof(*msg) + data_size));
+               return -ENOMEM;
        }
+
+       mutex_lock(&msg_ctx->mux);
+       msg_ctx->msg = msg;
        msg_ctx->msg->index = msg_ctx->index;
        msg_ctx->msg->data_len = data_size;
        msg_ctx->type = msg_type;
        memcpy(msg_ctx->msg->data, data, data_size);
        msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
-       mutex_lock(&daemon->mux);
        list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+       mutex_unlock(&msg_ctx->mux);
+
+       mutex_lock(&daemon->mux);
        daemon->num_queued_msg_ctx++;
        wake_up_interruptible(&daemon->wait);
        mutex_unlock(&daemon->mux);
-out_unlock:
-       mutex_unlock(&msg_ctx->mux);
-       return rc;
+
+       return 0;
 }
 
 /*
@@ -269,8 +274,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
+       if (task_pid(current) != daemon->pid) {
+               mutex_unlock(&daemon->mux);
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EPERM;
+       }
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
                rc = 0;
                mutex_unlock(&ecryptfs_daemon_hash_mux);
@@ -307,9 +320,6 @@ check_list:
                 * message from the queue; try again */
                goto check_list;
        }
-       BUG_ON(euid != daemon->euid);
-       BUG_ON(current_user_ns() != daemon->user_ns);
-       BUG_ON(task_pid(current) != daemon->pid);
        msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
                                   struct ecryptfs_msg_ctx, daemon_out_list);
        BUG_ON(!msg_ctx);
index dba15fecf23e376f12ad2cb367424cf202d3a819..d81b9f654086d1cdb3899767cf6e1e5bf05e9f6e 100644 (file)
@@ -46,20 +46,16 @@ struct eventfd_ctx {
  * value, and we signal this as overflow condition by returining a POLLERR
  * to poll(2).
  *
- * Returns @n in case of success, a non-negative number lower than @n in case
- * of overflow, or the following error codes:
- *
- * -EINVAL    : The value of @n is negative.
+ * Returns the amount by which the counter was incrememnted.  This will be less
+ * than @n if the counter has overflowed.
  */
-int eventfd_signal(struct eventfd_ctx *ctx, int n)
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 {
        unsigned long flags;
 
-       if (n < 0)
-               return -EINVAL;
        spin_lock_irqsave(&ctx->wqh.lock, flags);
        if (ULLONG_MAX - ctx->count < n)
-               n = (int) (ULLONG_MAX - ctx->count);
+               n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, POLLIN);
index 079d1be65ba9e61e6f0452ecffc0cd23a6c3987d..1c8b55670804c20e88a10a48668f574d54f58205 100644 (file)
@@ -1710,7 +1710,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                goto error_tgt_fput;
 
        /* Check if EPOLLWAKEUP is allowed */
-       if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP))
+       if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
                epds.events &= ~EPOLLWAKEUP;
 
        /*
@@ -1853,8 +1853,6 @@ error_return:
        return error;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
-
 /*
  * Implement the event wait interface for the eventpoll file. It is the kernel
  * part of the user space epoll_pwait(2).
@@ -1899,8 +1897,6 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
        return error;
 }
 
-#endif /* HAVE_SET_RESTORE_SIGMASK */
-
 static int __init eventpoll_init(void)
 {
        struct sysinfo si;
index 52c9e2ff6e6bd8b6f763e56ceafda431731cea9b..da27b91ff1e8cbe87d0fe42aa5d39513e6a9deeb 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -280,10 +280,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        INIT_LIST_HEAD(&vma->anon_vma_chain);
 
-       err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-       if (err)
-               goto err;
-
        err = insert_vm_struct(mm, vma);
        if (err)
                goto err;
@@ -823,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm)
        /* Notify parent that we're no longer interested in the old VM */
        tsk = current;
        old_mm = current->mm;
-       sync_mm_rss(old_mm);
        mm_release(tsk, old_mm);
 
        if (old_mm) {
+               sync_mm_rss(old_mm);
                /*
                 * Make sure that if there is a core dump in progress
                 * for the old mm, we get out and die instead of going
index 49cf230554a21d33785d16367d9397ab34a44d91..24a49d47e9354c00f0ebd2da6c92d3520e71332c 100644 (file)
@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
 out:
        ios->numdevs = devs_in_group;
        ios->pages_consumed = cur_pg;
-       if (unlikely(ret)) {
-               if (length == ios->length)
-                       return ret;
-               else
-                       ios->length -= length;
-       }
-       return 0;
+       return ret;
 }
 
 int ore_create(struct ore_io_state *ios)
index d222c77cfa1ba0669ca7580c420a1c31da35c86a..5f376d14fdcc3c0d9791fce5f2951d35b54c778b 100644 (file)
@@ -144,26 +144,26 @@ static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
 {
        unsigned data_devs = sp2d->data_devs;
        unsigned group_width = data_devs + sp2d->parity;
-       unsigned p;
+       int p, c;
 
        if (!sp2d->needed)
                return;
 
-       for (p = 0; p < sp2d->pages_in_unit; p++) {
-               struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
-               if (_1ps->write_count < group_width) {
-                       unsigned c;
+       for (c = data_devs - 1; c >= 0; --c)
+               for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
+                       struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
 
-                       for (c = 0; c < data_devs; c++)
-                               if (_1ps->page_is_read[c]) {
-                                       struct page *page = _1ps->pages[c];
+                       if (_1ps->page_is_read[c]) {
+                               struct page *page = _1ps->pages[c];
 
-                                       r4w->put_page(priv, page);
-                                       _1ps->page_is_read[c] = false;
-                               }
+                               r4w->put_page(priv, page);
+                               _1ps->page_is_read[c] = false;
+                       }
                }
 
+       for (p = 0; p < sp2d->pages_in_unit; p++) {
+               struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
+
                memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
                _1ps->write_count = 0;
                _1ps->tx = NULL;
@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
  * ios->sp2d[p][*], xor is calculated the same way. These pages are
  * allocated/freed and don't go through cache
  */
-static int _read_4_write(struct ore_io_state *ios)
+static int _read_4_write_first_stripe(struct ore_io_state *ios)
 {
-       struct ore_io_state *ios_read;
        struct ore_striping_info read_si;
        struct __stripe_pages_2d *sp2d = ios->sp2d;
        u64 offset = ios->si.first_stripe_start;
-       u64 last_stripe_end;
-       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
-       unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
-       int ret;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
 
        if (offset == ios->offset) /* Go to start collect $200 */
                goto read_last_stripe;
@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
        min_p = _sp2d_min_pg(sp2d);
        max_p = _sp2d_max_pg(sp2d);
 
+       ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
+                  offset, ios->offset, min_p, max_p);
+
        for (c = 0; ; c++) {
                ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
                read_si.obj_offset += min_p * PAGE_SIZE;
@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
        }
 
 read_last_stripe:
+       return 0;
+}
+
+static int _read_4_write_last_stripe(struct ore_io_state *ios)
+{
+       struct ore_striping_info read_si;
+       struct __stripe_pages_2d *sp2d = ios->sp2d;
+       u64 offset;
+       u64 last_stripe_end;
+       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
        offset = ios->offset + ios->length;
        if (offset % PAGE_SIZE)
                _add_to_r4w_last_page(ios, &offset);
@@ -527,15 +538,15 @@ read_last_stripe:
        c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
                       ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
 
-       BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
-       /* unaligned IO must be within a single stripe */
-
        if (min_p == sp2d->pages_in_unit) {
                /* Didn't do it yet */
                min_p = _sp2d_min_pg(sp2d);
                max_p = _sp2d_max_pg(sp2d);
        }
 
+       ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
+                  offset, last_stripe_end, min_p, max_p);
+
        while (offset < last_stripe_end) {
                struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
 
@@ -568,6 +579,15 @@ read_last_stripe:
        }
 
 read_it:
+       return 0;
+}
+
+static int _read_4_write_execute(struct ore_io_state *ios)
+{
+       struct ore_io_state *ios_read;
+       unsigned i;
+       int ret;
+
        ios_read = ios->ios_read_4_write;
        if (!ios_read)
                return 0;
@@ -591,6 +611,8 @@ read_it:
        }
 
        _mark_read4write_pages_uptodate(ios_read, ret);
+       ore_put_io_state(ios_read);
+       ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
        return 0;
 }
 
@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
                        /* If first stripe, Read in all read4write pages
                         * (if needed) before we calculate the first parity.
                         */
-                       _read_4_write(ios);
+                       _read_4_write_first_stripe(ios);
                }
+               if (!cur_len) /* If last stripe r4w pages of last stripe */
+                       _read_4_write_last_stripe(ios);
+               _read_4_write_execute(ios);
 
                for (i = 0; i < num_pages; i++) {
                        pages[i] = _raid_page_alloc();
@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
 
 int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
 {
-       struct ore_layout *layout = ios->layout;
-
        if (ios->parity_pages) {
+               struct ore_layout *layout = ios->layout;
                unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
-               unsigned stripe_size = ios->si.bytes_in_stripe;
-               u64 last_stripe, first_stripe;
 
                if (_sp2d_alloc(pages_in_unit, layout->group_width,
                                layout->parity, &ios->sp2d)) {
                        return -ENOMEM;
                }
-
-               /* Round io down to last full strip */
-               first_stripe = div_u64(ios->offset, stripe_size);
-               last_stripe = div_u64(ios->offset + ios->length, stripe_size);
-
-               /* If an IO spans more then a single stripe it must end at
-                * a stripe boundary. The reminder at the end is pushed into the
-                * next IO.
-                */
-               if (last_stripe != first_stripe) {
-                       ios->length = last_stripe * stripe_size - ios->offset;
-
-                       BUG_ON(!ios->length);
-                       ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
-                                       PAGE_SIZE;
-                       ios->si.length = ios->length; /*make it consistent */
-               }
        }
        return 0;
 }
index e32bc919e4e3413d6a8063ac3d0bfb466c959f22..5a7b691e748bdcda66746098d42194cb4682f7e5 100644 (file)
@@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = {
 static struct kobj_type uuid_ktype = {
 };
 
-void exofs_sysfs_dbg_print()
+void exofs_sysfs_dbg_print(void)
 {
 #ifdef CONFIG_EXOFS_DEBUG
        struct kobject *k_name, *k_tmp;
index b05acb7961355dfb680e49f3145a11065f6ac851..b0201ca6e9c6e0b7837917420bb3dfe1dc06b88f 100644 (file)
@@ -304,24 +304,23 @@ out:
 
 /**
  * export_encode_fh - default export_operations->encode_fh function
- * @dentry:  the dentry to encode
+ * @inode:   the object to encode
  * @fh:      where to store the file handle fragment
  * @max_len: maximum length to store there
- * @connectable: whether to store parent information
+ * @parent:  parent directory inode, if wanted
  *
  * This default encode_fh function assumes that the 32 inode number
  * is suitable for locating an inode, and that the generation number
  * can be used to check that it is still valid.  It places them in the
  * filehandle fragment where export_decode_fh expects to find them.
  */
-static int export_encode_fh(struct dentry *dentry, struct fid *fid,
-               int *max_len, int connectable)
+static int export_encode_fh(struct inode *inode, struct fid *fid,
+               int *max_len, struct inode *parent)
 {
-       struct inode * inode = dentry->d_inode;
        int len = *max_len;
        int type = FILEID_INO32_GEN;
 
-       if (connectable && (len < 4)) {
+       if (parent && (len < 4)) {
                *max_len = 4;
                return 255;
        } else if (len < 2) {
@@ -332,14 +331,9 @@ static int export_encode_fh(struct dentry *dentry, struct fid *fid,
        len = 2;
        fid->i32.ino = inode->i_ino;
        fid->i32.gen = inode->i_generation;
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                fid->i32.parent_ino = parent->i_ino;
                fid->i32.parent_gen = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
                len = 4;
                type = FILEID_INO32_GEN_PARENT;
        }
@@ -352,11 +346,22 @@ int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
 {
        const struct export_operations *nop = dentry->d_sb->s_export_op;
        int error;
+       struct dentry *p = NULL;
+       struct inode *inode = dentry->d_inode, *parent = NULL;
 
+       if (connectable && !S_ISDIR(inode->i_mode)) {
+               p = dget_parent(dentry);
+               /*
+                * note that while p might've ceased to be our parent already,
+                * it's still pinned by and still positive.
+                */
+               parent = p->d_inode;
+       }
        if (nop->encode_fh)
-               error = nop->encode_fh(dentry, fid->raw, max_len, connectable);
+               error = nop->encode_fh(inode, fid->raw, max_len, parent);
        else
-               error = export_encode_fh(dentry, fid, max_len, connectable);
+               error = export_encode_fh(inode, fid, max_len, parent);
+       dput(p);
 
        return error;
 }
index 9ed1bb1f319f381b700a6386a4d8d068d04e0fdf..c22f17021b6eee7ca942a3525eb9f4fd23de6011 100644 (file)
@@ -2,6 +2,8 @@ config EXT4_FS
        tristate "The Extended 4 (ext4) filesystem"
        select JBD2
        select CRC16
+       select CRYPTO
+       select CRYPTO_CRC32C
        help
          This is the next generation of the ext3 filesystem.
 
index c45c41129a35b7346463e0f18e847b78a52e0426..cee7812cc3cf9a17f8dc967eb7af480c079c5f44 100644 (file)
@@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
         * unusual file system layouts.
         */
        if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
-               block_cluster = EXT4_B2C(sbi, (start -
-                                              ext4_block_bitmap(sb, gdp)));
+               block_cluster = EXT4_B2C(sbi,
+                                        ext4_block_bitmap(sb, gdp) - start);
                if (block_cluster < num_clusters)
                        block_cluster = -1;
                else if (block_cluster == num_clusters) {
@@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
 
        if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
                inode_cluster = EXT4_B2C(sbi,
-                                        start - ext4_inode_bitmap(sb, gdp));
+                                        ext4_inode_bitmap(sb, gdp) - start);
                if (inode_cluster < num_clusters)
                        inode_cluster = -1;
                else if (inode_cluster == num_clusters) {
@@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
        itbl_blk = ext4_inode_table(sb, gdp);
        for (i = 0; i < sbi->s_itb_per_group; i++) {
                if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
-                       c = EXT4_B2C(sbi, start - itbl_blk + i);
+                       c = EXT4_B2C(sbi, itbl_blk + i - start);
                        if ((c < num_clusters) || (c == inode_cluster) ||
                            (c == block_cluster) || (c == itbl_cluster))
                                continue;
@@ -168,12 +168,14 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
 
        /* If checksum is bad mark all blocks used to prevent allocation
         * essentially implementing a per-group read-only flag. */
-       if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+       if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                ext4_free_group_clusters_set(sb, gdp, 0);
                ext4_free_inodes_set(sb, gdp, 0);
                ext4_itable_unused_set(sb, gdp, 0);
                memset(bh->b_data, 0xff, sb->s_blocksize);
+               ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+                                          EXT4_BLOCKS_PER_GROUP(sb) / 8);
                return;
        }
        memset(bh->b_data, 0, sb->s_blocksize);
@@ -210,6 +212,9 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
         */
        ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
                             sb->s_blocksize * 8, bh->b_data);
+       ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
 }
 
 /* Return the number of free blocks in a block group.  It is used when
@@ -276,9 +281,9 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
 }
 
 static int ext4_valid_block_bitmap(struct super_block *sb,
-                                       struct ext4_group_desc *desc,
-                                       unsigned int block_group,
-                                       struct buffer_head *bh)
+                                  struct ext4_group_desc *desc,
+                                  unsigned int block_group,
+                                  struct buffer_head *bh)
 {
        ext4_grpblk_t offset;
        ext4_grpblk_t next_zero_bit;
@@ -325,6 +330,23 @@ err_out:
                        block_group, bitmap_blk);
        return 0;
 }
+
+void ext4_validate_block_bitmap(struct super_block *sb,
+                              struct ext4_group_desc *desc,
+                              unsigned int block_group,
+                              struct buffer_head *bh)
+{
+       if (buffer_verified(bh))
+               return;
+
+       ext4_lock_group(sb, block_group);
+       if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
+           ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
+                                         EXT4_BLOCKS_PER_GROUP(sb) / 8))
+               set_buffer_verified(bh);
+       ext4_unlock_group(sb, block_group);
+}
+
 /**
  * ext4_read_block_bitmap()
  * @sb:                        super block
@@ -355,12 +377,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
        }
 
        if (bitmap_uptodate(bh))
-               return bh;
+               goto verify;
 
        lock_buffer(bh);
        if (bitmap_uptodate(bh)) {
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
        ext4_lock_group(sb, block_group);
        if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -379,7 +401,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
                 */
                set_bitmap_uptodate(bh);
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
        /*
         * submit the buffer_head for reading
@@ -390,6 +412,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
        get_bh(bh);
        submit_bh(READ, bh);
        return bh;
+verify:
+       ext4_validate_block_bitmap(sb, desc, block_group, bh);
+       return bh;
 }
 
 /* Returns 0 on success, 1 on error */
@@ -412,7 +437,7 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
        }
        clear_buffer_new(bh);
        /* Panic or remount fs read-only if block bitmap is invalid */
-       ext4_valid_block_bitmap(sb, desc, block_group, bh);
+       ext4_validate_block_bitmap(sb, desc, block_group, bh);
        return 0;
 }
 
index fa3af81ac565c16dba6237edc89c3c7d70c5fc61..b319721da26ae32010adcd46db7e2d98ec50887a 100644 (file)
@@ -29,3 +29,86 @@ unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
 
 #endif  /*  EXT4FS_DEBUG  */
 
+int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz)
+{
+       __u32 hi;
+       __u32 provided, calculated;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
+       calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
+               hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi);
+               provided |= (hi << 16);
+       } else
+               calculated &= 0xFFFF;
+
+       return provided == calculated;
+}
+
+void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz)
+{
+       __u32 csum;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
+       if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
+               gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16);
+}
+
+int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz)
+{
+       __u32 hi;
+       __u32 provided, calculated;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
+       calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) {
+               hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi);
+               provided |= (hi << 16);
+       } else
+               calculated &= 0xFFFF;
+
+       if (provided == calculated)
+               return 1;
+
+       ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
+       return 0;
+}
+
+void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz)
+{
+       __u32 csum;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
+       if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END)
+               gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16);
+}
index b86786202643bdd8044ee85fb72a0a21bc2c9bef..aa39e600d15954244aead38f7aed30513ce86d65 100644 (file)
@@ -179,6 +179,18 @@ static int ext4_readdir(struct file *filp,
                        continue;
                }
 
+               /* Check the checksum */
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(inode,
+                               (struct ext4_dir_entry *)bh->b_data)) {
+                       EXT4_ERROR_FILE(filp, 0, "directory fails checksum "
+                                       "at offset %llu",
+                                       (unsigned long long)filp->f_pos);
+                       filp->f_pos += sb->s_blocksize - offset;
+                       continue;
+               }
+               set_buffer_verified(bh);
+
 revalidate:
                /* If the dir block has changed since the last call to
                 * readdir(2), then we might be pointing to an invalid
index c21b1de51afbb42191adea4fc4a357e3906c8489..cfc4e01b3c8370c642681824ef55b13a66683c0d 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
+#include <crypto/hash.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
 #endif
@@ -298,7 +299,9 @@ struct ext4_group_desc
        __le16  bg_free_inodes_count_lo;/* Free inodes count */
        __le16  bg_used_dirs_count_lo;  /* Directories count */
        __le16  bg_flags;               /* EXT4_BG_flags (INODE_UNINIT, etc) */
-       __u32   bg_reserved[2];         /* Likely block/inode bitmap checksum */
+       __le32  bg_exclude_bitmap_lo;   /* Exclude bitmap for snapshots */
+       __le16  bg_block_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+bbitmap) LE */
+       __le16  bg_inode_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+ibitmap) LE */
        __le16  bg_itable_unused_lo;    /* Unused inodes count */
        __le16  bg_checksum;            /* crc16(sb_uuid+group+desc) */
        __le32  bg_block_bitmap_hi;     /* Blocks bitmap block MSB */
@@ -308,9 +311,19 @@ struct ext4_group_desc
        __le16  bg_free_inodes_count_hi;/* Free inodes count MSB */
        __le16  bg_used_dirs_count_hi;  /* Directories count MSB */
        __le16  bg_itable_unused_hi;    /* Unused inodes count MSB */
-       __u32   bg_reserved2[3];
+       __le32  bg_exclude_bitmap_hi;   /* Exclude bitmap block MSB */
+       __le16  bg_block_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+bbitmap) BE */
+       __le16  bg_inode_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+ibitmap) BE */
+       __u32   bg_reserved;
 };
 
+#define EXT4_BG_INODE_BITMAP_CSUM_HI_END       \
+       (offsetof(struct ext4_group_desc, bg_inode_bitmap_csum_hi) + \
+        sizeof(__le16))
+#define EXT4_BG_BLOCK_BITMAP_CSUM_HI_END       \
+       (offsetof(struct ext4_group_desc, bg_block_bitmap_csum_hi) + \
+        sizeof(__le16))
+
 /*
  * Structure of a flex block group info
  */
@@ -650,7 +663,8 @@ struct ext4_inode {
                        __le16  l_i_file_acl_high;
                        __le16  l_i_uid_high;   /* these 2 fields */
                        __le16  l_i_gid_high;   /* were reserved2[0] */
-                       __u32   l_i_reserved2;
+                       __le16  l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */
+                       __le16  l_i_reserved;
                } linux2;
                struct {
                        __le16  h_i_reserved1;  /* Obsoleted fragment number/size which are removed in ext4 */
@@ -666,7 +680,7 @@ struct ext4_inode {
                } masix2;
        } osd2;                         /* OS dependent 2 */
        __le16  i_extra_isize;
-       __le16  i_pad1;
+       __le16  i_checksum_hi;  /* crc32c(uuid+inum+inode) BE */
        __le32  i_ctime_extra;  /* extra Change time      (nsec << 2 | epoch) */
        __le32  i_mtime_extra;  /* extra Modification time(nsec << 2 | epoch) */
        __le32  i_atime_extra;  /* extra Access time      (nsec << 2 | epoch) */
@@ -768,7 +782,7 @@ do {                                                                               \
 #define i_gid_low      i_gid
 #define i_uid_high     osd2.linux2.l_i_uid_high
 #define i_gid_high     osd2.linux2.l_i_gid_high
-#define i_reserved2    osd2.linux2.l_i_reserved2
+#define i_checksum_lo  osd2.linux2.l_i_checksum_lo
 
 #elif defined(__GNU__)
 
@@ -908,6 +922,9 @@ struct ext4_inode_info {
         */
        tid_t i_sync_tid;
        tid_t i_datasync_tid;
+
+       /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
+       __u32 i_csum_seed;
 };
 
 /*
@@ -1001,6 +1018,9 @@ extern void ext4_set_bits(void *bm, int cur, int len);
 #define EXT4_ERRORS_PANIC              3       /* Panic */
 #define EXT4_ERRORS_DEFAULT            EXT4_ERRORS_CONTINUE
 
+/* Metadata checksum algorithm codes */
+#define EXT4_CRC32C_CHKSUM             1
+
 /*
  * Structure of the super block
  */
@@ -1087,7 +1107,7 @@ struct ext4_super_block {
        __le64  s_mmp_block;            /* Block for multi-mount protection */
        __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
        __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
-       __u8    s_reserved_char_pad;
+       __u8    s_checksum_type;        /* metadata checksum algorithm used */
        __le16  s_reserved_pad;
        __le64  s_kbytes_written;       /* nr of lifetime kilobytes written */
        __le32  s_snapshot_inum;        /* Inode number of active snapshot */
@@ -1113,7 +1133,8 @@ struct ext4_super_block {
        __le32  s_usr_quota_inum;       /* inode for tracking user quota */
        __le32  s_grp_quota_inum;       /* inode for tracking group quota */
        __le32  s_overhead_clusters;    /* overhead blocks/clusters in fs */
-       __le32  s_reserved[109];        /* Padding to the end of the block */
+       __le32  s_reserved[108];        /* Padding to the end of the block */
+       __le32  s_checksum;             /* crc32c(superblock) */
 };
 
 #define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START)
@@ -1176,6 +1197,7 @@ struct ext4_sb_info {
        struct proc_dir_entry *s_proc;
        struct kobject s_kobj;
        struct completion s_kobj_unregister;
+       struct super_block *s_sb;
 
        /* Journaling */
        struct journal_s *s_journal;
@@ -1266,6 +1288,12 @@ struct ext4_sb_info {
 
        /* record the last minlen when FITRIM is called. */
        atomic_t s_last_trim_minblks;
+
+       /* Reference to checksum algorithm driver via cryptoapi */
+       struct crypto_shash *s_chksum_driver;
+
+       /* Precomputed FS UUID checksum for seeding other checksums */
+       __u32 s_csum_seed;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1414,6 +1442,12 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE     0x0040
 #define EXT4_FEATURE_RO_COMPAT_QUOTA           0x0100
 #define EXT4_FEATURE_RO_COMPAT_BIGALLOC                0x0200
+/*
+ * METADATA_CSUM also enables group descriptor checksums (GDT_CSUM).  When
+ * METADATA_CSUM is set, group descriptor checksums use the same algorithm as
+ * all other data structures' checksums.  However, the METADATA_CSUM and
+ * GDT_CSUM bits are mutually exclusive.
+ */
 #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM   0x0400
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
@@ -1461,7 +1495,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
                                         EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
                                         EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
                                         EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\
-                                        EXT4_FEATURE_RO_COMPAT_BIGALLOC)
+                                        EXT4_FEATURE_RO_COMPAT_BIGALLOC |\
+                                        EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)
 
 /*
  * Default values for user and/or group using reserved blocks
@@ -1526,6 +1561,18 @@ struct ext4_dir_entry_2 {
        char    name[EXT4_NAME_LEN];    /* File name */
 };
 
+/*
+ * This is a bogus directory entry at the end of each leaf block that
+ * records checksums.
+ */
+struct ext4_dir_entry_tail {
+       __le32  det_reserved_zero1;     /* Pretend to be unused */
+       __le16  det_rec_len;            /* 12 */
+       __u8    det_reserved_zero2;     /* Zero name length */
+       __u8    det_reserved_ft;        /* 0xDE, fake file type */
+       __le32  det_checksum;           /* crc32c(uuid+inum+dirblock) */
+};
+
 /*
  * Ext4 directory file types.  Only the low 3 bits are used.  The
  * other bits are reserved for now.
@@ -1541,6 +1588,8 @@ struct ext4_dir_entry_2 {
 
 #define EXT4_FT_MAX            8
 
+#define EXT4_FT_DIR_CSUM       0xDE
+
 /*
  * EXT4_DIR_PAD defines the directory entries boundaries
  *
@@ -1609,6 +1658,25 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
 #define DX_HASH_HALF_MD4_UNSIGNED      4
 #define DX_HASH_TEA_UNSIGNED           5
 
+static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
+                             const void *address, unsigned int length)
+{
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(sbi->s_chksum_driver)];
+       } desc;
+       int err;
+
+       desc.shash.tfm = sbi->s_chksum_driver;
+       desc.shash.flags = 0;
+       *(u32 *)desc.ctx = crc;
+
+       err = crypto_shash_update(&desc.shash, address, length);
+       BUG_ON(err);
+
+       return *(u32 *)desc.ctx;
+}
+
 #ifdef __KERNEL__
 
 /* hash info structure used by the directory hash */
@@ -1741,7 +1809,8 @@ struct mmp_struct {
        __le16  mmp_check_interval;
 
        __le16  mmp_pad1;
-       __le32  mmp_pad2[227];
+       __le32  mmp_pad2[226];
+       __le32  mmp_checksum;           /* crc32c(uuid+mmp_block) */
 };
 
 /* arguments passed to the mmp thread */
@@ -1784,8 +1853,24 @@ struct mmpd_data {
 
 /* bitmap.c */
 extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz);
+int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz);
+void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz);
+int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz);
 
 /* balloc.c */
+extern void ext4_validate_block_bitmap(struct super_block *sb,
+                                      struct ext4_group_desc *desc,
+                                      unsigned int block_group,
+                                      struct buffer_head *bh);
 extern unsigned int ext4_block_group(struct super_block *sb,
                        ext4_fsblk_t blocknr);
 extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
@@ -1864,7 +1949,7 @@ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
 /* mballoc.c */
 extern long ext4_mb_stats;
 extern long ext4_mb_max_to_scan;
-extern int ext4_mb_init(struct super_block *, int);
+extern int ext4_mb_init(struct super_block *);
 extern int ext4_mb_release(struct super_block *);
 extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
                                struct ext4_allocation_request *, int *);
@@ -1936,6 +2021,8 @@ extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
 extern int ext4_ext_migrate(struct inode *);
 
 /* namei.c */
+extern int ext4_dirent_csum_verify(struct inode *inode,
+                                  struct ext4_dir_entry *dirent);
 extern int ext4_orphan_add(handle_t *, struct inode *);
 extern int ext4_orphan_del(handle_t *, struct inode *);
 extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
@@ -1950,6 +2037,10 @@ extern int ext4_group_extend(struct super_block *sb,
 extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
 
 /* super.c */
+extern int ext4_superblock_csum_verify(struct super_block *sb,
+                                      struct ext4_super_block *es);
+extern void ext4_superblock_csum_set(struct super_block *sb,
+                                    struct ext4_super_block *es);
 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
 extern void *ext4_kvzalloc(size_t size, gfp_t flags);
 extern void ext4_kvfree(void *ptr);
@@ -2025,10 +2116,17 @@ extern void ext4_used_dirs_set(struct super_block *sb,
                                struct ext4_group_desc *bg, __u32 count);
 extern void ext4_itable_unused_set(struct super_block *sb,
                                   struct ext4_group_desc *bg, __u32 count);
-extern __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 group,
-                                  struct ext4_group_desc *gdp);
-extern int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 group,
+extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
                                       struct ext4_group_desc *gdp);
+extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
+                                    struct ext4_group_desc *gdp);
+
+static inline int ext4_has_group_desc_csum(struct super_block *sb)
+{
+       return EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                         EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
+                                         EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
+}
 
 static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
 {
@@ -2225,6 +2323,9 @@ static inline void ext4_unlock_group(struct super_block *sb,
 
 static inline void ext4_mark_super_dirty(struct super_block *sb)
 {
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+       ext4_superblock_csum_set(sb, es);
        if (EXT4_SB(sb)->s_journal == NULL)
                sb->s_dirt =1;
 }
@@ -2314,6 +2415,9 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
 
 /* mmp.c */
 extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp);
+extern int ext4_mmp_csum_verify(struct super_block *sb,
+                               struct mmp_struct *mmp);
 
 /* BH_Uninit flag: blocks are allocated but uninitialized on disk */
 enum ext4_state_bits {
index 0f58b86e3a0206e19626f361f453aa80b2838857..cb1b2c919963290fd10d09ba12f6d8c53ace9fa6 100644 (file)
  * ext4_inode has i_block array (60 bytes total).
  * The first 12 bytes store ext4_extent_header;
  * the remainder stores an array of ext4_extent.
+ * For non-inode extent blocks, ext4_extent_tail
+ * follows the array.
  */
 
+/*
+ * This is the extent tail on-disk structure.
+ * All other extent structures are 12 bytes long.  It turns out that
+ * block_size % 12 >= 4 for at least all powers of 2 greater than 512, which
+ * covers all valid ext4 block sizes.  Therefore, this tail structure can be
+ * crammed into the end of the block without having to rebalance the tree.
+ */
+struct ext4_extent_tail {
+       __le32  et_checksum;    /* crc32c(uuid+inum+extent_block) */
+};
+
 /*
  * This is the extent on-disk structure.
  * It's used at the bottom of the tree.
@@ -101,6 +114,17 @@ struct ext4_extent_header {
 
 #define EXT4_EXT_MAGIC         cpu_to_le16(0xf30a)
 
+#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
+       (sizeof(struct ext4_extent_header) + \
+        (sizeof(struct ext4_extent) * le16_to_cpu((hdr)->eh_max)))
+
+static inline struct ext4_extent_tail *
+find_ext4_extent_tail(struct ext4_extent_header *eh)
+{
+       return (struct ext4_extent_tail *)(((void *)eh) +
+                                          EXT4_EXTENT_TAIL_OFFSET(eh));
+}
+
 /*
  * Array of ext4_ext_path contains path to some extent.
  * Creation/lookup routines use it for traversal/splitting/etc.
index aca17901758249d4329d780714e328ef42851e35..90f7c2e84db1bef3fdb90931f9124cfe052705dd 100644 (file)
@@ -138,16 +138,23 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
 }
 
 int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb)
+                             handle_t *handle, struct super_block *sb,
+                             int now)
 {
        struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
        int err = 0;
 
        if (ext4_handle_valid(handle)) {
+               ext4_superblock_csum_set(sb,
+                               (struct ext4_super_block *)bh->b_data);
                err = jbd2_journal_dirty_metadata(handle, bh);
                if (err)
                        ext4_journal_abort_handle(where, line, __func__,
                                                  bh, handle, err);
+       } else if (now) {
+               ext4_superblock_csum_set(sb,
+                               (struct ext4_super_block *)bh->b_data);
+               mark_buffer_dirty(bh);
        } else
                sb->s_dirt = 1;
        return err;
index 83b20fcf9400b11b28185470f8feef309faa8252..f440e8f1841f4e2521486bd94ae19ed83aa896ab 100644 (file)
@@ -213,7 +213,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
                                 struct buffer_head *bh);
 
 int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb);
+                             handle_t *handle, struct super_block *sb,
+                             int now);
 
 #define ext4_journal_get_write_access(handle, bh) \
        __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
@@ -225,8 +226,10 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
 #define ext4_handle_dirty_metadata(handle, inode, bh) \
        __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
                                     (bh))
+#define ext4_handle_dirty_super_now(handle, sb) \
+       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 1)
 #define ext4_handle_dirty_super(handle, sb) \
-       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
+       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 0)
 
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
index abcdeab67f5232b66d4aa5a6cbb88838094f6247..91341ec6e06a94f2f400d10039a64585cd17ed2e 100644 (file)
 #define EXT4_EXT_MARK_UNINIT1  0x2  /* mark first half uninitialized */
 #define EXT4_EXT_MARK_UNINIT2  0x4  /* mark second half uninitialized */
 
+static __le32 ext4_extent_block_csum(struct inode *inode,
+                                    struct ext4_extent_header *eh)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
+                          EXT4_EXTENT_TAIL_OFFSET(eh));
+       return cpu_to_le32(csum);
+}
+
+static int ext4_extent_block_csum_verify(struct inode *inode,
+                                        struct ext4_extent_header *eh)
+{
+       struct ext4_extent_tail *et;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       et = find_ext4_extent_tail(eh);
+       if (et->et_checksum != ext4_extent_block_csum(inode, eh))
+               return 0;
+       return 1;
+}
+
+static void ext4_extent_block_csum_set(struct inode *inode,
+                                      struct ext4_extent_header *eh)
+{
+       struct ext4_extent_tail *et;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       et = find_ext4_extent_tail(eh);
+       et->et_checksum = ext4_extent_block_csum(inode, eh);
+}
+
 static int ext4_split_extent(handle_t *handle,
                                struct inode *inode,
                                struct ext4_ext_path *path,
@@ -117,6 +157,7 @@ static int __ext4_ext_dirty(const char *where, unsigned int line,
 {
        int err;
        if (path->p_bh) {
+               ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
                /* path points to block */
                err = __ext4_handle_dirty_metadata(where, line, handle,
                                                   inode, path->p_bh);
@@ -391,6 +432,12 @@ static int __ext4_ext_check(const char *function, unsigned int line,
                error_msg = "invalid extent entries";
                goto corrupted;
        }
+       /* Verify checksum on non-root extent tree nodes */
+       if (ext_depth(inode) != depth &&
+           !ext4_extent_block_csum_verify(inode, eh)) {
+               error_msg = "extent tree corrupted";
+               goto corrupted;
+       }
        return 0;
 
 corrupted:
@@ -412,6 +459,26 @@ int ext4_ext_check_inode(struct inode *inode)
        return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
 }
 
+static int __ext4_ext_check_block(const char *function, unsigned int line,
+                                 struct inode *inode,
+                                 struct ext4_extent_header *eh,
+                                 int depth,
+                                 struct buffer_head *bh)
+{
+       int ret;
+
+       if (buffer_verified(bh))
+               return 0;
+       ret = ext4_ext_check(inode, eh, depth);
+       if (ret)
+               return ret;
+       set_buffer_verified(bh);
+       return ret;
+}
+
+#define ext4_ext_check_block(inode, eh, depth, bh)     \
+       __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
+
 #ifdef EXT_DEBUG
 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
 {
@@ -536,7 +603,7 @@ ext4_ext_binsearch_idx(struct inode *inode,
        }
 
        path->p_idx = l - 1;
-       ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
+       ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
                  ext4_idx_pblock(path->p_idx));
 
 #ifdef CHECK_BINSEARCH
@@ -668,8 +735,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
        i = depth;
        /* walk through the tree */
        while (i) {
-               int need_to_validate = 0;
-
                ext_debug("depth %d: num %d, max %d\n",
                          ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
 
@@ -688,8 +753,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                                put_bh(bh);
                                goto err;
                        }
-                       /* validate the extent entries */
-                       need_to_validate = 1;
                }
                eh = ext_block_hdr(bh);
                ppos++;
@@ -703,7 +766,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                path[ppos].p_hdr = eh;
                i--;
 
-               if (need_to_validate && ext4_ext_check(inode, eh, i))
+               if (ext4_ext_check_block(inode, eh, i, bh))
                        goto err;
        }
 
@@ -914,6 +977,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                le16_add_cpu(&neh->eh_entries, m);
        }
 
+       ext4_extent_block_csum_set(inode, neh);
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
@@ -992,6 +1056,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                                sizeof(struct ext4_extent_idx) * m);
                        le16_add_cpu(&neh->eh_entries, m);
                }
+               ext4_extent_block_csum_set(inode, neh);
                set_buffer_uptodate(bh);
                unlock_buffer(bh);
 
@@ -1089,6 +1154,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        else
                neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
        neh->eh_magic = EXT4_EXT_MAGIC;
+       ext4_extent_block_csum_set(inode, neh);
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
@@ -1344,7 +1410,8 @@ got_index:
                        return -EIO;
                eh = ext_block_hdr(bh);
                /* subtract from p_depth to get proper eh_depth */
-               if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
+               if (ext4_ext_check_block(inode, eh,
+                                        path->p_depth - depth, bh)) {
                        put_bh(bh);
                        return -EIO;
                }
@@ -1357,7 +1424,7 @@ got_index:
        if (bh == NULL)
                return -EIO;
        eh = ext_block_hdr(bh);
-       if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
+       if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
                put_bh(bh);
                return -EIO;
        }
@@ -2644,8 +2711,8 @@ cont:
                                err = -EIO;
                                break;
                        }
-                       if (ext4_ext_check(inode, ext_block_hdr(bh),
-                                                       depth - i - 1)) {
+                       if (ext4_ext_check_block(inode, ext_block_hdr(bh),
+                                                       depth - i - 1, bh)) {
                                err = -EIO;
                                break;
                        }
@@ -4722,8 +4789,8 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
 
        /* Now release the pages */
        if (last_page_offset > first_page_offset) {
-               truncate_inode_pages_range(mapping, first_page_offset,
-                                          last_page_offset-1);
+               truncate_pagecache_range(inode, first_page_offset,
+                                        last_page_offset - 1);
        }
 
        /* finish any pending end_io work */
index cb70f1812a70f5ca8452e98776cd309ad6638055..8c7642a00054fd1ddf649e733e4b6efb5a0eb14b 100644 (file)
@@ -95,7 +95,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
        int unaligned_aio = 0;
-       int ret;
+       ssize_t ret;
 
        /*
         * If we have encountered a bitmap-format file, the size limit
index 9f9acac6c43f4ac8006e363b5567b4a88dd56615..d48e8b14928cf993c50c33fe9b18a90203c2c492 100644 (file)
@@ -70,24 +70,27 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
                                       ext4_group_t block_group,
                                       struct ext4_group_desc *gdp)
 {
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-
        J_ASSERT_BH(bh, buffer_locked(bh));
 
        /* If checksum is bad mark all blocks and inodes use to prevent
         * allocation, essentially implementing a per-group read-only flag. */
-       if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+       if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                ext4_free_group_clusters_set(sb, gdp, 0);
                ext4_free_inodes_set(sb, gdp, 0);
                ext4_itable_unused_set(sb, gdp, 0);
                memset(bh->b_data, 0xff, sb->s_blocksize);
+               ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8);
                return 0;
        }
 
        memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
        ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
                        bh->b_data);
+       ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+                                  EXT4_INODES_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
 
        return EXT4_INODES_PER_GROUP(sb);
 }
@@ -128,12 +131,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                return NULL;
        }
        if (bitmap_uptodate(bh))
-               return bh;
+               goto verify;
 
        lock_buffer(bh);
        if (bitmap_uptodate(bh)) {
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
 
        ext4_lock_group(sb, block_group);
@@ -141,6 +144,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                ext4_init_inode_bitmap(sb, bh, block_group, desc);
                set_bitmap_uptodate(bh);
                set_buffer_uptodate(bh);
+               set_buffer_verified(bh);
                ext4_unlock_group(sb, block_group);
                unlock_buffer(bh);
                return bh;
@@ -154,7 +158,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                 */
                set_bitmap_uptodate(bh);
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
        /*
         * submit the buffer_head for reading
@@ -171,6 +175,20 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                           block_group, bitmap_blk);
                return NULL;
        }
+
+verify:
+       ext4_lock_group(sb, block_group);
+       if (!buffer_verified(bh) &&
+           !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8)) {
+               ext4_unlock_group(sb, block_group);
+               put_bh(bh);
+               ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
+                          "inode_bitmap = %llu", block_group, bitmap_blk);
+               return NULL;
+       }
+       ext4_unlock_group(sb, block_group);
+       set_buffer_verified(bh);
        return bh;
 }
 
@@ -276,7 +294,9 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
                ext4_used_dirs_set(sb, gdp, count);
                percpu_counter_dec(&sbi->s_dirs_counter);
        }
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+       ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+                                  EXT4_INODES_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
 
        percpu_counter_inc(&sbi->s_freeinodes_counter);
@@ -488,10 +508,12 @@ fallback_retry:
        for (i = 0; i < ngroups; i++) {
                grp = (parent_group + i) % ngroups;
                desc = ext4_get_group_desc(sb, grp, NULL);
-               grp_free = ext4_free_inodes_count(sb, desc);
-               if (desc && grp_free && grp_free >= avefreei) {
-                       *group = grp;
-                       return 0;
+               if (desc) {
+                       grp_free = ext4_free_inodes_count(sb, desc);
+                       if (grp_free && grp_free >= avefreei) {
+                               *group = grp;
+                               return 0;
+                       }
                }
        }
 
@@ -709,7 +731,7 @@ repeat_in_this_group:
 
 got:
        /* We may have to initialize the block bitmap if it isn't already */
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
+       if (ext4_has_group_desc_csum(sb) &&
            gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
                struct buffer_head *block_bitmap_bh;
 
@@ -731,8 +753,11 @@ got:
                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                        ext4_free_group_clusters_set(sb, gdp,
                                ext4_free_clusters_after_init(sb, group, gdp));
-                       gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
-                                                               gdp);
+                       ext4_block_bitmap_csum_set(sb, group, gdp,
+                                                  block_bitmap_bh,
+                                                  EXT4_BLOCKS_PER_GROUP(sb) /
+                                                  8);
+                       ext4_group_desc_csum_set(sb, group, gdp);
                }
                ext4_unlock_group(sb, group);
 
@@ -751,7 +776,7 @@ got:
                goto fail;
 
        /* Update the relevant bg descriptor fields */
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
+       if (ext4_has_group_desc_csum(sb)) {
                int free;
                struct ext4_group_info *grp = ext4_get_group_info(sb, group);
 
@@ -772,7 +797,10 @@ got:
                        ext4_itable_unused_set(sb, gdp,
                                        (EXT4_INODES_PER_GROUP(sb) - ino));
                up_read(&grp->alloc_sem);
+       } else {
+               ext4_lock_group(sb, group);
        }
+
        ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
        if (S_ISDIR(mode)) {
                ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
@@ -782,10 +810,12 @@ got:
                        atomic_inc(&sbi->s_flex_groups[f].used_dirs);
                }
        }
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-               gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
-               ext4_unlock_group(sb, group);
+       if (ext4_has_group_desc_csum(sb)) {
+               ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8);
+               ext4_group_desc_csum_set(sb, group, gdp);
        }
+       ext4_unlock_group(sb, group);
 
        BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
        err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
@@ -850,6 +880,19 @@ got:
        inode->i_generation = sbi->s_next_generation++;
        spin_unlock(&sbi->s_next_gen_lock);
 
+       /* Precompute checksum seed for inode metadata */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               __u32 csum;
+               struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+               __le32 inum = cpu_to_le32(inode->i_ino);
+               __le32 gen = cpu_to_le32(inode->i_generation);
+               csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+                                  sizeof(inum));
+               ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
+                                             sizeof(gen));
+       }
+
        ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
        ext4_set_inode_state(inode, EXT4_STATE_NEW);
 
@@ -1140,7 +1183,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
 skip_zeroout:
        ext4_lock_group(sb, group);
        gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+       ext4_group_desc_csum_set(sb, group, gdp);
        ext4_unlock_group(sb, group);
 
        BUFFER_TRACE(group_desc_bh,
index 07eaf565fdcb2ad4fba4f92c6fe55a01b2fea17b..02bc8cbe7281b3d47c3449a1c4b8e4220685ba52 100644 (file)
 
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
+static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
+                             struct ext4_inode_info *ei)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       __u16 csum_lo;
+       __u16 csum_hi = 0;
+       __u32 csum;
+
+       csum_lo = raw->i_checksum_lo;
+       raw->i_checksum_lo = 0;
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
+               csum_hi = raw->i_checksum_hi;
+               raw->i_checksum_hi = 0;
+       }
+
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
+                          EXT4_INODE_SIZE(inode->i_sb));
+
+       raw->i_checksum_lo = csum_lo;
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+               raw->i_checksum_hi = csum_hi;
+
+       return csum;
+}
+
+static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
+                                 struct ext4_inode_info *ei)
+{
+       __u32 provided, calculated;
+
+       if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+           cpu_to_le32(EXT4_OS_LINUX) ||
+           !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       provided = le16_to_cpu(raw->i_checksum_lo);
+       calculated = ext4_inode_csum(inode, raw, ei);
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+               provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
+       else
+               calculated &= 0xFFFF;
+
+       return provided == calculated;
+}
+
+static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+                               struct ext4_inode_info *ei)
+{
+       __u32 csum;
+
+       if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+           cpu_to_le32(EXT4_OS_LINUX) ||
+           !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       csum = ext4_inode_csum(inode, raw, ei);
+       raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+               raw->i_checksum_hi = cpu_to_le16(csum >> 16);
+}
+
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
                                              loff_t new_size)
 {
@@ -3517,8 +3584,7 @@ make_io:
                                b = table;
                        end = b + EXT4_SB(sb)->s_inode_readahead_blks;
                        num = EXT4_INODES_PER_GROUP(sb);
-                       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-                                      EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+                       if (ext4_has_group_desc_csum(sb))
                                num -= ext4_itable_unused_count(sb, gdp);
                        table += num / inodes_per_block;
                        if (end > table)
@@ -3646,6 +3712,39 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        if (ret < 0)
                goto bad_inode;
        raw_inode = ext4_raw_inode(&iloc);
+
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+               ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+               if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+                   EXT4_INODE_SIZE(inode->i_sb)) {
+                       EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
+                               EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
+                               EXT4_INODE_SIZE(inode->i_sb));
+                       ret = -EIO;
+                       goto bad_inode;
+               }
+       } else
+               ei->i_extra_isize = 0;
+
+       /* Precompute checksum seed for inode metadata */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+               __u32 csum;
+               __le32 inum = cpu_to_le32(inode->i_ino);
+               __le32 gen = raw_inode->i_generation;
+               csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+                                  sizeof(inum));
+               ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
+                                             sizeof(gen));
+       }
+
+       if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
+               EXT4_ERROR_INODE(inode, "checksum invalid");
+               ret = -EIO;
+               goto bad_inode;
+       }
+
        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
        i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
        i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
@@ -3725,12 +3824,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        }
 
        if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
-               ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-               if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-                   EXT4_INODE_SIZE(inode->i_sb)) {
-                       ret = -EIO;
-                       goto bad_inode;
-               }
                if (ei->i_extra_isize == 0) {
                        /* The extra space is currently unused. Use it. */
                        ei->i_extra_isize = sizeof(struct ext4_inode) -
@@ -3742,8 +3835,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
                                ext4_set_inode_state(inode, EXT4_STATE_XATTR);
                }
-       } else
-               ei->i_extra_isize = 0;
+       }
 
        EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
        EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
@@ -3942,7 +4034,7 @@ static int ext4_do_update_inode(handle_t *handle,
                        EXT4_SET_RO_COMPAT_FEATURE(sb,
                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
                        ext4_handle_sync(handle);
-                       err = ext4_handle_dirty_super(handle, sb);
+                       err = ext4_handle_dirty_super_now(handle, sb);
                }
        }
        raw_inode->i_generation = cpu_to_le32(inode->i_generation);
@@ -3969,6 +4061,8 @@ static int ext4_do_update_inode(handle_t *handle,
                raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
        }
 
+       ext4_inode_csum_set(inode, raw_inode, ei);
+
        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
        rc = ext4_handle_dirty_metadata(handle, NULL, bh);
        if (!err)
@@ -4213,7 +4307,8 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
         * will return the blocks that include the delayed allocation
         * blocks for this file.
         */
-       delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+       delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
+                               EXT4_I(inode)->i_reserved_data_blocks);
 
        stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
        return 0;
index 6eee25591b8159bc96d35a16f94f94c0855a35b9..6ec6f9ee2fec9099c87f523bbf69614eed789050 100644 (file)
@@ -38,7 +38,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                handle_t *handle = NULL;
                int err, migrate = 0;
                struct ext4_iloc iloc;
-               unsigned int oldflags;
+               unsigned int oldflags, mask, i;
                unsigned int jflag;
 
                if (!inode_owner_or_capable(inode))
@@ -115,9 +115,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (err)
                        goto flags_err;
 
-               flags = flags & EXT4_FL_USER_MODIFIABLE;
-               flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
-               ei->i_flags = flags;
+               for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
+                       if (!(mask & EXT4_FL_USER_MODIFIABLE))
+                               continue;
+                       if (mask & flags)
+                               ext4_set_inode_flag(inode, i);
+                       else
+                               ext4_clear_inode_flag(inode, i);
+               }
 
                ext4_set_inode_flags(inode);
                inode->i_ctime = ext4_current_time(inode);
@@ -152,6 +157,13 @@ flags_out:
                if (!inode_owner_or_capable(inode))
                        return -EPERM;
 
+               if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+                       ext4_warning(sb, "Setting inode version is not "
+                                    "supported with metadata_csum enabled.");
+                       return -ENOTTY;
+               }
+
                err = mnt_want_write_file(filp);
                if (err)
                        return err;
@@ -256,7 +268,6 @@ group_extend_out:
                err = ext4_move_extents(filp, donor_filp, me.orig_start,
                                        me.donor_start, me.len, &me.moved_len);
                mnt_drop_write_file(filp);
-               mnt_drop_write(filp->f_path.mnt);
 
                if (copy_to_user((struct move_extent __user *)arg,
                                 &me, sizeof(me)))
index 99ab428bcfa089822e74b433aee7b1bf4076e34d..1cd6994fc446008b74dc9b77863edf0f24e14c33 100644 (file)
@@ -788,7 +788,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
        int first_block;
        struct super_block *sb;
        struct buffer_head *bhs;
-       struct buffer_head **bh;
+       struct buffer_head **bh = NULL;
        struct inode *inode;
        char *data;
        char *bitmap;
@@ -2375,7 +2375,7 @@ static int ext4_groupinfo_create_slab(size_t size)
        return 0;
 }
 
-int ext4_mb_init(struct super_block *sb, int needs_recovery)
+int ext4_mb_init(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        unsigned i, j;
@@ -2517,6 +2517,9 @@ int ext4_mb_release(struct super_block *sb)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
 
+       if (sbi->s_proc)
+               remove_proc_entry("mb_groups", sbi->s_proc);
+
        if (sbi->s_group_info) {
                for (i = 0; i < ngroups; i++) {
                        grinfo = ext4_get_group_info(sb, i);
@@ -2564,8 +2567,6 @@ int ext4_mb_release(struct super_block *sb)
        }
 
        free_percpu(sbi->s_locality_groups);
-       if (sbi->s_proc)
-               remove_proc_entry("mb_groups", sbi->s_proc);
 
        return 0;
 }
@@ -2797,7 +2798,9 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        }
        len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
        ext4_free_group_clusters_set(sb, gdp, len);
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
+       ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
 
        ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
        percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
@@ -3071,13 +3074,9 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
 {
        struct ext4_prealloc_space *pa = ac->ac_pa;
-       int len;
-
-       if (pa && pa->pa_type == MB_INODE_PA) {
-               len = ac->ac_b_ex.fe_len;
-               pa->pa_free += len;
-       }
 
+       if (pa && pa->pa_type == MB_INODE_PA)
+               pa->pa_free += ac->ac_b_ex.fe_len;
 }
 
 /*
@@ -4636,6 +4635,7 @@ do_more:
                 */
                new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
                if (!new_entry) {
+                       ext4_mb_unload_buddy(&e4b);
                        err = -ENOMEM;
                        goto error_return;
                }
@@ -4659,7 +4659,9 @@ do_more:
 
        ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
        ext4_free_group_clusters_set(sb, gdp, ret);
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+       ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
        percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
 
@@ -4803,7 +4805,9 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
        mb_free_blocks(NULL, &e4b, bit, count);
        blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
        ext4_free_group_clusters_set(sb, desc, blk_free_count);
-       desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+       ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, desc);
        ext4_unlock_group(sb, block_group);
        percpu_counter_add(&sbi->s_freeclusters_counter,
                           EXT4_B2C(sbi, blocks_freed));
index ed6548d89165e1d9c31118aca21d3e89a3772ab2..f99a1311e84765296b0a0a04534e0be0536915bc 100644 (file)
@@ -6,12 +6,45 @@
 
 #include "ext4.h"
 
+/* Checksumming functions */
+static __u32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int offset = offsetof(struct mmp_struct, mmp_checksum);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
+
+       return cpu_to_le32(csum);
+}
+
+int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
+}
+
+void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
+}
+
 /*
  * Write the MMP block using WRITE_SYNC to try to get the block on-disk
  * faster.
  */
-static int write_mmp_block(struct buffer_head *bh)
+static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
 {
+       struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
+
+       ext4_mmp_csum_set(sb, mmp);
        mark_buffer_dirty(bh);
        lock_buffer(bh);
        bh->b_end_io = end_buffer_write_sync;
@@ -59,7 +92,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
        }
 
        mmp = (struct mmp_struct *)((*bh)->b_data);
-       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
+       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
+           !ext4_mmp_csum_verify(sb, mmp))
                return -EINVAL;
 
        return 0;
@@ -120,7 +154,7 @@ static int kmmpd(void *data)
                mmp->mmp_time = cpu_to_le64(get_seconds());
                last_update_time = jiffies;
 
-               retval = write_mmp_block(bh);
+               retval = write_mmp_block(sb, bh);
                /*
                 * Don't spew too many error messages. Print one every
                 * (s_mmp_update_interval * 60) seconds.
@@ -200,7 +234,7 @@ static int kmmpd(void *data)
        mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
        mmp->mmp_time = cpu_to_le64(get_seconds());
 
-       retval = write_mmp_block(bh);
+       retval = write_mmp_block(sb, bh);
 
 failed:
        kfree(data);
@@ -299,7 +333,7 @@ skip:
        seq = mmp_new_seq();
        mmp->mmp_seq = cpu_to_le32(seq);
 
-       retval = write_mmp_block(bh);
+       retval = write_mmp_block(sb, bh);
        if (retval)
                goto failed;
 
index e2a3f4b0ff78d6f81fbf2228f12f201e6ab1a024..5845cd97bf8b094b0fc01082279e8d65ee73f241 100644 (file)
@@ -145,6 +145,14 @@ struct dx_map_entry
        u16 size;
 };
 
+/*
+ * This goes at the end of each htree block.
+ */
+struct dx_tail {
+       u32 dt_reserved;
+       __le32 dt_checksum;     /* crc32c(uuid+inum+dirblock) */
+};
+
 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
 static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
 static inline unsigned dx_get_hash(struct dx_entry *entry);
@@ -180,6 +188,230 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                             struct inode *inode);
 
+/* checksumming functions */
+#define EXT4_DIRENT_TAIL(block, blocksize) \
+       ((struct ext4_dir_entry_tail *)(((void *)(block)) + \
+                                       ((blocksize) - \
+                                        sizeof(struct ext4_dir_entry_tail))))
+
+static void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+                                  unsigned int blocksize)
+{
+       memset(t, 0, sizeof(struct ext4_dir_entry_tail));
+       t->det_rec_len = ext4_rec_len_to_disk(
+                       sizeof(struct ext4_dir_entry_tail), blocksize);
+       t->det_reserved_ft = EXT4_FT_DIR_CSUM;
+}
+
+/* Walk through a dirent block to find a checksum "dirent" at the tail */
+static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
+                                                  struct ext4_dir_entry *de)
+{
+       struct ext4_dir_entry_tail *t;
+
+#ifdef PARANOID
+       struct ext4_dir_entry *d, *top;
+
+       d = de;
+       top = (struct ext4_dir_entry *)(((void *)de) +
+               (EXT4_BLOCK_SIZE(inode->i_sb) -
+               sizeof(struct ext4_dir_entry_tail)));
+       while (d < top && d->rec_len)
+               d = (struct ext4_dir_entry *)(((void *)d) +
+                   le16_to_cpu(d->rec_len));
+
+       if (d != top)
+               return NULL;
+
+       t = (struct ext4_dir_entry_tail *)d;
+#else
+       t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
+#endif
+
+       if (t->det_reserved_zero1 ||
+           le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
+           t->det_reserved_zero2 ||
+           t->det_reserved_ft != EXT4_FT_DIR_CSUM)
+               return NULL;
+
+       return t;
+}
+
+static __le32 ext4_dirent_csum(struct inode *inode,
+                              struct ext4_dir_entry *dirent, int size)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+       return cpu_to_le32(csum);
+}
+
+int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
+{
+       struct ext4_dir_entry_tail *t;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       t = get_dirent_tail(inode, dirent);
+       if (!t) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
+                                "leaf for checksum.  Please run e2fsck -D.");
+               return 0;
+       }
+
+       if (t->det_checksum != ext4_dirent_csum(inode, dirent,
+                                               (void *)t - (void *)dirent))
+               return 0;
+
+       return 1;
+}
+
+static void ext4_dirent_csum_set(struct inode *inode,
+                                struct ext4_dir_entry *dirent)
+{
+       struct ext4_dir_entry_tail *t;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       t = get_dirent_tail(inode, dirent);
+       if (!t) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
+                                "leaf for checksum.  Please run e2fsck -D.");
+               return;
+       }
+
+       t->det_checksum = ext4_dirent_csum(inode, dirent,
+                                          (void *)t - (void *)dirent);
+}
+
+static inline int ext4_handle_dirty_dirent_node(handle_t *handle,
+                                               struct inode *inode,
+                                               struct buffer_head *bh)
+{
+       ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
+       return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
+static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
+                                              struct ext4_dir_entry *dirent,
+                                              int *offset)
+{
+       struct ext4_dir_entry *dp;
+       struct dx_root_info *root;
+       int count_offset;
+
+       if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+               count_offset = 8;
+       else if (le16_to_cpu(dirent->rec_len) == 12) {
+               dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
+               if (le16_to_cpu(dp->rec_len) !=
+                   EXT4_BLOCK_SIZE(inode->i_sb) - 12)
+                       return NULL;
+               root = (struct dx_root_info *)(((void *)dp + 12));
+               if (root->reserved_zero ||
+                   root->info_length != sizeof(struct dx_root_info))
+                       return NULL;
+               count_offset = 32;
+       } else
+               return NULL;
+
+       if (offset)
+               *offset = count_offset;
+       return (struct dx_countlimit *)(((void *)dirent) + count_offset);
+}
+
+static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
+                          int count_offset, int count, struct dx_tail *t)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __u32 csum, old_csum;
+       int size;
+
+       size = count_offset + (count * sizeof(struct dx_entry));
+       old_csum = t->dt_checksum;
+       t->dt_checksum = 0;
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+       csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
+       t->dt_checksum = old_csum;
+
+       return cpu_to_le32(csum);
+}
+
+static int ext4_dx_csum_verify(struct inode *inode,
+                              struct ext4_dir_entry *dirent)
+{
+       struct dx_countlimit *c;
+       struct dx_tail *t;
+       int count_offset, limit, count;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       c = get_dx_countlimit(inode, dirent, &count_offset);
+       if (!c) {
+               EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
+               return 1;
+       }
+       limit = le16_to_cpu(c->limit);
+       count = le16_to_cpu(c->count);
+       if (count_offset + (limit * sizeof(struct dx_entry)) >
+           EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
+                                "tree checksum found.  Run e2fsck -D.");
+               return 1;
+       }
+       t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
+
+       if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
+                                           count, t))
+               return 0;
+       return 1;
+}
+
+static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
+{
+       struct dx_countlimit *c;
+       struct dx_tail *t;
+       int count_offset, limit, count;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       c = get_dx_countlimit(inode, dirent, &count_offset);
+       if (!c) {
+               EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
+               return;
+       }
+       limit = le16_to_cpu(c->limit);
+       count = le16_to_cpu(c->count);
+       if (count_offset + (limit * sizeof(struct dx_entry)) >
+           EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
+                                "tree checksum.  Run e2fsck -D.");
+               return;
+       }
+       t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
+
+       t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
+}
+
+static inline int ext4_handle_dirty_dx_node(handle_t *handle,
+                                           struct inode *inode,
+                                           struct buffer_head *bh)
+{
+       ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
+       return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
 /*
  * p is at least 6 bytes before the end of page
  */
@@ -239,12 +471,20 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
 {
        unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
                EXT4_DIR_REC_LEN(2) - infosize;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               entry_space -= sizeof(struct dx_tail);
        return entry_space / sizeof(struct dx_entry);
 }
 
 static inline unsigned dx_node_limit(struct inode *dir)
 {
        unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               entry_space -= sizeof(struct dx_tail);
        return entry_space / sizeof(struct dx_entry);
 }
 
@@ -390,6 +630,15 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
                goto fail;
        }
 
+       if (!buffer_verified(bh) &&
+           !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) {
+               ext4_warning(dir->i_sb, "Root failed checksum");
+               brelse(bh);
+               *err = ERR_BAD_DX_DIR;
+               goto fail;
+       }
+       set_buffer_verified(bh);
+
        entries = (struct dx_entry *) (((char *)&root->info) +
                                       root->info.info_length);
 
@@ -450,6 +699,17 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
                if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
                        goto fail2;
                at = entries = ((struct dx_node *) bh->b_data)->entries;
+
+               if (!buffer_verified(bh) &&
+                   !ext4_dx_csum_verify(dir,
+                                        (struct ext4_dir_entry *)bh->b_data)) {
+                       ext4_warning(dir->i_sb, "Node failed checksum");
+                       brelse(bh);
+                       *err = ERR_BAD_DX_DIR;
+                       goto fail;
+               }
+               set_buffer_verified(bh);
+
                if (dx_get_limit(entries) != dx_node_limit (dir)) {
                        ext4_warning(dir->i_sb,
                                     "dx entry: limit != node limit");
@@ -549,6 +809,15 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
                if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
                                      0, &err)))
                        return err; /* Failure */
+
+               if (!buffer_verified(bh) &&
+                   !ext4_dx_csum_verify(dir,
+                                        (struct ext4_dir_entry *)bh->b_data)) {
+                       ext4_warning(dir->i_sb, "Node failed checksum");
+                       return -EIO;
+               }
+               set_buffer_verified(bh);
+
                p++;
                brelse(p->bh);
                p->bh = bh;
@@ -577,6 +846,11 @@ static int htree_dirblock_to_tree(struct file *dir_file,
        if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
                return err;
 
+       if (!buffer_verified(bh) &&
+           !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
+               return -EIO;
+       set_buffer_verified(bh);
+
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        top = (struct ext4_dir_entry_2 *) ((char *) de +
                                           dir->i_sb->s_blocksize -
@@ -936,6 +1210,15 @@ restart:
                        brelse(bh);
                        goto next;
                }
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(dir,
+                               (struct ext4_dir_entry *)bh->b_data)) {
+                       EXT4_ERROR_INODE(dir, "checksumming directory "
+                                        "block %lu", (unsigned long)block);
+                       brelse(bh);
+                       goto next;
+               }
+               set_buffer_verified(bh);
                i = search_dirblock(bh, dir, d_name,
                            block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
                if (i == 1) {
@@ -987,6 +1270,16 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
                if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
                        goto errout;
 
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(dir,
+                               (struct ext4_dir_entry *)bh->b_data)) {
+                       EXT4_ERROR_INODE(dir, "checksumming directory "
+                                        "block %lu", (unsigned long)block);
+                       brelse(bh);
+                       *err = -EIO;
+                       goto errout;
+               }
+               set_buffer_verified(bh);
                retval = search_dirblock(bh, dir, d_name,
                                         block << EXT4_BLOCK_SIZE_BITS(sb),
                                         res_dir);
@@ -1037,6 +1330,12 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
                        EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
                        return ERR_PTR(-EIO);
                }
+               if (unlikely(ino == dir->i_ino)) {
+                       EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
+                                        dentry->d_name.len,
+                                        dentry->d_name.name);
+                       return ERR_PTR(-EIO);
+               }
                inode = ext4_iget(dir->i_sb, ino);
                if (inode == ERR_PTR(-ESTALE)) {
                        EXT4_ERROR_INODE(dir,
@@ -1156,8 +1455,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
        char *data1 = (*bh)->b_data, *data2;
        unsigned split, move, size;
        struct ext4_dir_entry_2 *de = NULL, *de2;
+       struct ext4_dir_entry_tail *t;
+       int     csum_size = 0;
        int     err = 0, i;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
        bh2 = ext4_append (handle, dir, &newblock, &err);
        if (!(bh2)) {
                brelse(*bh);
@@ -1204,10 +1509,20 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
        /* Fancy dance to stay within two buffers */
        de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
        de = dx_pack_dirents(data1, blocksize);
-       de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+                                          (char *) de,
                                           blocksize);
-       de2->rec_len = ext4_rec_len_to_disk(data2 + blocksize - (char *) de2,
+       de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
+                                           (char *) de2,
                                            blocksize);
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(data2, blocksize);
+               initialize_dirent_tail(t, blocksize);
+
+               t = EXT4_DIRENT_TAIL(data1, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
        dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
 
@@ -1218,10 +1533,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
                de = de2;
        }
        dx_insert_block(frame, hash2 + continued, newblock);
-       err = ext4_handle_dirty_metadata(handle, dir, bh2);
+       err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
        if (err)
                goto journal_error;
-       err = ext4_handle_dirty_metadata(handle, dir, frame->bh);
+       err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
        if (err)
                goto journal_error;
        brelse(bh2);
@@ -1258,11 +1573,16 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
        unsigned short  reclen;
        int             nlen, rlen, err;
        char            *top;
+       int             csum_size = 0;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
 
        reclen = EXT4_DIR_REC_LEN(namelen);
        if (!de) {
                de = (struct ext4_dir_entry_2 *)bh->b_data;
-               top = bh->b_data + blocksize - reclen;
+               top = bh->b_data + (blocksize - csum_size) - reclen;
                while ((char *) de <= top) {
                        if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
                                return -EIO;
@@ -1295,11 +1615,8 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
                de = de1;
        }
        de->file_type = EXT4_FT_UNKNOWN;
-       if (inode) {
-               de->inode = cpu_to_le32(inode->i_ino);
-               ext4_set_de_type(dir->i_sb, de, inode->i_mode);
-       } else
-               de->inode = 0;
+       de->inode = cpu_to_le32(inode->i_ino);
+       ext4_set_de_type(dir->i_sb, de, inode->i_mode);
        de->name_len = namelen;
        memcpy(de->name, name, namelen);
        /*
@@ -1318,7 +1635,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
        dir->i_version++;
        ext4_mark_inode_dirty(handle, dir);
        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-       err = ext4_handle_dirty_metadata(handle, dir, bh);
+       err = ext4_handle_dirty_dirent_node(handle, dir, bh);
        if (err)
                ext4_std_error(dir->i_sb, err);
        return 0;
@@ -1339,6 +1656,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        struct dx_frame frames[2], *frame;
        struct dx_entry *entries;
        struct ext4_dir_entry_2 *de, *de2;
+       struct ext4_dir_entry_tail *t;
        char            *data1, *top;
        unsigned        len;
        int             retval;
@@ -1346,6 +1664,11 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        struct dx_hash_info hinfo;
        ext4_lblk_t  block;
        struct fake_dirent *fde;
+       int             csum_size = 0;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
 
        blocksize =  dir->i_sb->s_blocksize;
        dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
@@ -1366,7 +1689,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
                brelse(bh);
                return -EIO;
        }
-       len = ((char *) root) + blocksize - (char *) de;
+       len = ((char *) root) + (blocksize - csum_size) - (char *) de;
 
        /* Allocate new block for the 0th block's dirents */
        bh2 = ext4_append(handle, dir, &block, &retval);
@@ -1382,8 +1705,15 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        top = data1 + len;
        while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
                de = de2;
-       de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+                                          (char *) de,
                                           blocksize);
+
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(data1, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        /* Initialize the root; the dot dirents already exist */
        de = (struct ext4_dir_entry_2 *) (&root->dotdot);
        de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
@@ -1408,8 +1738,8 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        frame->bh = bh;
        bh = bh2;
 
-       ext4_handle_dirty_metadata(handle, dir, frame->bh);
-       ext4_handle_dirty_metadata(handle, dir, bh);
+       ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+       ext4_handle_dirty_dirent_node(handle, dir, bh);
 
        de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
        if (!de) {
@@ -1445,11 +1775,17 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
        struct inode *dir = dentry->d_parent->d_inode;
        struct buffer_head *bh;
        struct ext4_dir_entry_2 *de;
+       struct ext4_dir_entry_tail *t;
        struct super_block *sb;
        int     retval;
        int     dx_fallback=0;
        unsigned blocksize;
        ext4_lblk_t block, blocks;
+       int     csum_size = 0;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
 
        sb = dir->i_sb;
        blocksize = sb->s_blocksize;
@@ -1468,6 +1804,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                bh = ext4_bread(handle, dir, block, 0, &retval);
                if(!bh)
                        return retval;
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(dir,
+                               (struct ext4_dir_entry *)bh->b_data))
+                       return -EIO;
+               set_buffer_verified(bh);
                retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
                if (retval != -ENOSPC) {
                        brelse(bh);
@@ -1484,7 +1825,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                return retval;
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        de->inode = 0;
-       de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
+       de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
+
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
        brelse(bh);
        if (retval == 0)
@@ -1516,6 +1863,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
        if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
                goto cleanup;
 
+       if (!buffer_verified(bh) &&
+           !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
+               goto journal_error;
+       set_buffer_verified(bh);
+
        BUFFER_TRACE(bh, "get_write_access");
        err = ext4_journal_get_write_access(handle, bh);
        if (err)
@@ -1583,7 +1935,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                        dxtrace(dx_show_index("node", frames[1].entries));
                        dxtrace(dx_show_index("node",
                               ((struct dx_node *) bh2->b_data)->entries));
-                       err = ext4_handle_dirty_metadata(handle, dir, bh2);
+                       err = ext4_handle_dirty_dx_node(handle, dir, bh2);
                        if (err)
                                goto journal_error;
                        brelse (bh2);
@@ -1609,7 +1961,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                        if (err)
                                goto journal_error;
                }
-               err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh);
+               err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
                if (err) {
                        ext4_std_error(inode->i_sb, err);
                        goto cleanup;
@@ -1641,12 +1993,17 @@ static int ext4_delete_entry(handle_t *handle,
 {
        struct ext4_dir_entry_2 *de, *pde;
        unsigned int blocksize = dir->i_sb->s_blocksize;
+       int csum_size = 0;
        int i, err;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
        i = 0;
        pde = NULL;
        de = (struct ext4_dir_entry_2 *) bh->b_data;
-       while (i < bh->b_size) {
+       while (i < bh->b_size - csum_size) {
                if (ext4_check_dir_entry(dir, NULL, de, bh, i))
                        return -EIO;
                if (de == de_del)  {
@@ -1667,7 +2024,7 @@ static int ext4_delete_entry(handle_t *handle,
                                de->inode = 0;
                        dir->i_version++;
                        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-                       err = ext4_handle_dirty_metadata(handle, dir, bh);
+                       err = ext4_handle_dirty_dirent_node(handle, dir, bh);
                        if (unlikely(err)) {
                                ext4_std_error(dir->i_sb, err);
                                return err;
@@ -1809,9 +2166,15 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct inode *inode;
        struct buffer_head *dir_block = NULL;
        struct ext4_dir_entry_2 *de;
+       struct ext4_dir_entry_tail *t;
        unsigned int blocksize = dir->i_sb->s_blocksize;
+       int csum_size = 0;
        int err, retries = 0;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
        if (EXT4_DIR_LINK_MAX(dir))
                return -EMLINK;
 
@@ -1852,16 +2215,24 @@ retry:
        ext4_set_de_type(dir->i_sb, de, S_IFDIR);
        de = ext4_next_entry(de, blocksize);
        de->inode = cpu_to_le32(dir->i_ino);
-       de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1),
+       de->rec_len = ext4_rec_len_to_disk(blocksize -
+                                          (csum_size + EXT4_DIR_REC_LEN(1)),
                                           blocksize);
        de->name_len = 2;
        strcpy(de->name, "..");
        ext4_set_de_type(dir->i_sb, de, S_IFDIR);
        set_nlink(inode, 2);
+
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-       err = ext4_handle_dirty_metadata(handle, inode, dir_block);
+       err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
        if (err)
                goto out_clear_inode;
+       set_buffer_verified(dir_block);
        err = ext4_mark_inode_dirty(handle, inode);
        if (!err)
                err = ext4_add_entry(handle, dentry, inode);
@@ -1911,6 +2282,14 @@ static int empty_dir(struct inode *inode)
                                     inode->i_ino);
                return 1;
        }
+       if (!buffer_verified(bh) &&
+           !ext4_dirent_csum_verify(inode,
+                       (struct ext4_dir_entry *)bh->b_data)) {
+               EXT4_ERROR_INODE(inode, "checksum error reading directory "
+                                "lblock 0");
+               return -EIO;
+       }
+       set_buffer_verified(bh);
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        de1 = ext4_next_entry(de, sb->s_blocksize);
        if (le32_to_cpu(de->inode) != inode->i_ino ||
@@ -1942,6 +2321,14 @@ static int empty_dir(struct inode *inode)
                                offset += sb->s_blocksize;
                                continue;
                        }
+                       if (!buffer_verified(bh) &&
+                           !ext4_dirent_csum_verify(inode,
+                                       (struct ext4_dir_entry *)bh->b_data)) {
+                               EXT4_ERROR_INODE(inode, "checksum error "
+                                                "reading directory lblock 0");
+                               return -EIO;
+                       }
+                       set_buffer_verified(bh);
                        de = (struct ext4_dir_entry_2 *) bh->b_data;
                }
                if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
@@ -2010,7 +2397,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
        /* Insert this inode at the head of the on-disk orphan list... */
        NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
        EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_super_now(handle, sb);
        rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
        if (!err)
                err = rc;
@@ -2083,7 +2470,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
                if (err)
                        goto out_brelse;
                sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
+               err = ext4_handle_dirty_super_now(handle, inode->i_sb);
        } else {
                struct ext4_iloc iloc2;
                struct inode *i_prev =
@@ -2442,6 +2829,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval);
                if (!dir_bh)
                        goto end_rename;
+               if (!buffer_verified(dir_bh) &&
+                   !ext4_dirent_csum_verify(old_inode,
+                               (struct ext4_dir_entry *)dir_bh->b_data))
+                       goto end_rename;
+               set_buffer_verified(dir_bh);
                if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
                                old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
                        goto end_rename;
@@ -2472,7 +2864,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                        ext4_current_time(new_dir);
                ext4_mark_inode_dirty(handle, new_dir);
                BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-               retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+               retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh);
                if (unlikely(retval)) {
                        ext4_std_error(new_dir->i_sb, retval);
                        goto end_rename;
@@ -2526,7 +2918,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
                                                cpu_to_le32(new_dir->i_ino);
                BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-               retval = ext4_handle_dirty_metadata(handle, old_inode, dir_bh);
+               retval = ext4_handle_dirty_dirent_node(handle, old_inode,
+                                                      dir_bh);
                if (retval) {
                        ext4_std_error(old_dir->i_sb, retval);
                        goto end_rename;
index 59fa0be272516adf6cbbc94384106690bf710c65..7ea6cbb44121952bf0d4f81f914950ab284dba6b 100644 (file)
@@ -161,6 +161,8 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
        if (flex_gd == NULL)
                goto out3;
 
+       if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+               goto out2;
        flex_gd->count = flexbg_size;
 
        flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
@@ -796,7 +798,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        ext4_kvfree(o_group_desc);
 
        le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_super_now(handle, sb);
        if (err)
                ext4_std_error(sb, err);
 
@@ -968,6 +970,8 @@ static void update_backups(struct super_block *sb,
                goto exit_err;
        }
 
+       ext4_superblock_csum_set(sb, (struct ext4_super_block *)data);
+
        while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
                struct buffer_head *bh;
 
@@ -1067,6 +1071,54 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
        return err;
 }
 
+static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
+{
+       struct buffer_head *bh = sb_getblk(sb, block);
+       if (!bh)
+               return NULL;
+
+       if (bitmap_uptodate(bh))
+               return bh;
+
+       lock_buffer(bh);
+       if (bh_submit_read(bh) < 0) {
+               unlock_buffer(bh);
+               brelse(bh);
+               return NULL;
+       }
+       unlock_buffer(bh);
+
+       return bh;
+}
+
+static int ext4_set_bitmap_checksums(struct super_block *sb,
+                                    ext4_group_t group,
+                                    struct ext4_group_desc *gdp,
+                                    struct ext4_new_group_data *group_data)
+{
+       struct buffer_head *bh;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 0;
+
+       bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
+       if (!bh)
+               return -EIO;
+       ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
+                                  EXT4_INODES_PER_GROUP(sb) / 8);
+       brelse(bh);
+
+       bh = ext4_get_bitmap(sb, group_data->block_bitmap);
+       if (!bh)
+               return -EIO;
+       ext4_block_bitmap_csum_set(sb, group, gdp, bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       brelse(bh);
+
+       return 0;
+}
+
 /*
  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
  */
@@ -1093,18 +1145,24 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
                 */
                gdb_bh = sbi->s_group_desc[gdb_num];
                /* Update group descriptor block for new group */
-               gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data +
+               gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
                                                 gdb_off * EXT4_DESC_SIZE(sb));
 
                memset(gdp, 0, EXT4_DESC_SIZE(sb));
                ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
                ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
+               err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
+               if (err) {
+                       ext4_std_error(sb, err);
+                       break;
+               }
+
                ext4_inode_table_set(sb, gdp, group_data->inode_table);
                ext4_free_group_clusters_set(sb, gdp,
                                             EXT4_B2C(sbi, group_data->free_blocks_count));
                ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
                gdp->bg_flags = cpu_to_le16(*bg_flags);
-               gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+               ext4_group_desc_csum_set(sb, group, gdp);
 
                err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
                if (unlikely(err)) {
@@ -1343,17 +1401,14 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
                           (1 + ext4_bg_num_gdb(sb, group + i) +
                            le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
                group_data[i].free_blocks_count = blocks_per_group - overhead;
-               if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-                                              EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+               if (ext4_has_group_desc_csum(sb))
                        flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
                                               EXT4_BG_INODE_UNINIT;
                else
                        flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
        }
 
-       if (last_group == n_group &&
-           EXT4_HAS_RO_COMPAT_FEATURE(sb,
-                                      EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+       if (last_group == n_group && ext4_has_group_desc_csum(sb))
                /* We need to initialize block bitmap of last group. */
                flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
 
index 35b5954489eeb88c6c5a29fd76fced5c3472e6f5..eb7aa3e4ef05caf136f24e0565a28e6d1e0a1539 100644 (file)
@@ -112,6 +112,48 @@ static struct file_system_type ext3_fs_type = {
 #define IS_EXT3_SB(sb) (0)
 #endif
 
+static int ext4_verify_csum_type(struct super_block *sb,
+                                struct ext4_super_block *es)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
+}
+
+static __le32 ext4_superblock_csum(struct super_block *sb,
+                                  struct ext4_super_block *es)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int offset = offsetof(struct ext4_super_block, s_checksum);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, ~0, (char *)es, offset);
+
+       return cpu_to_le32(csum);
+}
+
+int ext4_superblock_csum_verify(struct super_block *sb,
+                               struct ext4_super_block *es)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       return es->s_checksum == ext4_superblock_csum(sb, es);
+}
+
+void ext4_superblock_csum_set(struct super_block *sb,
+                             struct ext4_super_block *es)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       es->s_checksum = ext4_superblock_csum(sb, es);
+}
+
 void *ext4_kvmalloc(size_t size, gfp_t flags)
 {
        void *ret;
@@ -497,6 +539,7 @@ void __ext4_error(struct super_block *sb, const char *function,
        printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
               sb->s_id, function, line, current->comm, &vaf);
        va_end(args);
+       save_error_info(sb, function, line);
 
        ext4_handle_error(sb);
 }
@@ -905,6 +948,8 @@ static void ext4_put_super(struct super_block *sb)
        unlock_super(sb);
        kobject_put(&sbi->s_kobj);
        wait_for_completion(&sbi->s_kobj_unregister);
+       if (sbi->s_chksum_driver)
+               crypto_free_shash(sbi->s_chksum_driver);
        kfree(sbi->s_blockgroup_lock);
        kfree(sbi);
 }
@@ -1922,43 +1967,69 @@ failed:
        return 0;
 }
 
-__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
-                           struct ext4_group_desc *gdp)
+static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+                                  struct ext4_group_desc *gdp)
 {
+       int offset;
        __u16 crc = 0;
+       __le32 le_group = cpu_to_le32(block_group);
 
-       if (sbi->s_es->s_feature_ro_compat &
-           cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-               int offset = offsetof(struct ext4_group_desc, bg_checksum);
-               __le32 le_group = cpu_to_le32(block_group);
-
-               crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
-               crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
-               crc = crc16(crc, (__u8 *)gdp, offset);
-               offset += sizeof(gdp->bg_checksum); /* skip checksum */
-               /* for checksum of struct ext4_group_desc do the rest...*/
-               if ((sbi->s_es->s_feature_incompat &
-                    cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
-                   offset < le16_to_cpu(sbi->s_es->s_desc_size))
-                       crc = crc16(crc, (__u8 *)gdp + offset,
-                                   le16_to_cpu(sbi->s_es->s_desc_size) -
-                                       offset);
+       if ((sbi->s_es->s_feature_ro_compat &
+            cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
+               /* Use new metadata_csum algorithm */
+               __u16 old_csum;
+               __u32 csum32;
+
+               old_csum = gdp->bg_checksum;
+               gdp->bg_checksum = 0;
+               csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
+                                    sizeof(le_group));
+               csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
+                                    sbi->s_desc_size);
+               gdp->bg_checksum = old_csum;
+
+               crc = csum32 & 0xFFFF;
+               goto out;
        }
 
+       /* old crc16 code */
+       offset = offsetof(struct ext4_group_desc, bg_checksum);
+
+       crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+       crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
+       crc = crc16(crc, (__u8 *)gdp, offset);
+       offset += sizeof(gdp->bg_checksum); /* skip checksum */
+       /* for checksum of struct ext4_group_desc do the rest...*/
+       if ((sbi->s_es->s_feature_incompat &
+            cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
+           offset < le16_to_cpu(sbi->s_es->s_desc_size))
+               crc = crc16(crc, (__u8 *)gdp + offset,
+                           le16_to_cpu(sbi->s_es->s_desc_size) -
+                               offset);
+
+out:
        return cpu_to_le16(crc);
 }
 
-int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
+int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
                                struct ext4_group_desc *gdp)
 {
-       if ((sbi->s_es->s_feature_ro_compat &
-            cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) &&
-           (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp)))
+       if (ext4_has_group_desc_csum(sb) &&
+           (gdp->bg_checksum != ext4_group_desc_csum(EXT4_SB(sb),
+                                                     block_group, gdp)))
                return 0;
 
        return 1;
 }
 
+void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
+                             struct ext4_group_desc *gdp)
+{
+       if (!ext4_has_group_desc_csum(sb))
+               return;
+       gdp->bg_checksum = ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp);
+}
+
 /* Called at mount-time, super-block is locked */
 static int ext4_check_descriptors(struct super_block *sb,
                                  ext4_group_t *first_not_zeroed)
@@ -2013,7 +2084,7 @@ static int ext4_check_descriptors(struct super_block *sb,
                        return 0;
                }
                ext4_lock_group(sb, i);
-               if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
+               if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                                 "Checksum for group %u failed (%u!=%u)",
                                 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
@@ -2417,6 +2488,23 @@ static ssize_t sbi_ui_store(struct ext4_attr *a,
        return count;
 }
 
+static ssize_t trigger_test_error(struct ext4_attr *a,
+                                 struct ext4_sb_info *sbi,
+                                 const char *buf, size_t count)
+{
+       int len = count;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (len && buf[len-1] == '\n')
+               len--;
+
+       if (len)
+               ext4_error(sbi->s_sb, "%.*s", len, buf);
+       return count;
+}
+
 #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
 static struct ext4_attr ext4_attr_##_name = {                  \
        .attr = {.name = __stringify(_name), .mode = _mode },   \
@@ -2447,6 +2535,7 @@ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
 EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
 
 static struct attribute *ext4_attrs[] = {
        ATTR_LIST(delayed_allocation_blocks),
@@ -2461,6 +2550,7 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(mb_stream_req),
        ATTR_LIST(mb_group_prealloc),
        ATTR_LIST(max_writeback_mb_bump),
+       ATTR_LIST(trigger_fs_error),
        NULL,
 };
 
@@ -2957,6 +3047,44 @@ static void ext4_destroy_lazyinit_thread(void)
        kthread_stop(ext4_lazyinit_task);
 }
 
+static int set_journal_csum_feature_set(struct super_block *sb)
+{
+       int ret = 1;
+       int compat, incompat;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               /* journal checksum v2 */
+               compat = 0;
+               incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
+       } else {
+               /* journal checksum v1 */
+               compat = JBD2_FEATURE_COMPAT_CHECKSUM;
+               incompat = 0;
+       }
+
+       if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+               ret = jbd2_journal_set_features(sbi->s_journal,
+                               compat, 0,
+                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
+                               incompat);
+       } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
+               ret = jbd2_journal_set_features(sbi->s_journal,
+                               compat, 0,
+                               incompat);
+               jbd2_journal_clear_features(sbi->s_journal, 0, 0,
+                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+       } else {
+               jbd2_journal_clear_features(sbi->s_journal,
+                               JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
+                               JBD2_FEATURE_INCOMPAT_CSUM_V2);
+       }
+
+       return ret;
+}
+
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 {
        char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -2993,6 +3121,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                goto out_free_orig;
        }
        sb->s_fs_info = sbi;
+       sbi->s_sb = sb;
        sbi->s_mount_opt = 0;
        sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID);
        sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID);
@@ -3032,13 +3161,54 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
         * Note: s_es must be initialized as soon as possible because
         *       some ext4 macro-instructions depend on its value
         */
-       es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+       es = (struct ext4_super_block *) (bh->b_data + offset);
        sbi->s_es = es;
        sb->s_magic = le16_to_cpu(es->s_magic);
        if (sb->s_magic != EXT4_SUPER_MAGIC)
                goto cantfind_ext4;
        sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
 
+       /* Warn if metadata_csum and gdt_csum are both set. */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+           EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+               ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+                            "redundant flags; please run fsck.");
+
+       /* Check for a known checksum algorithm */
+       if (!ext4_verify_csum_type(sb, es)) {
+               ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+                        "unknown checksum algorithm.");
+               silent = 1;
+               goto cantfind_ext4;
+       }
+
+       /* Load the checksum driver */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+               if (IS_ERR(sbi->s_chksum_driver)) {
+                       ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
+                       ret = PTR_ERR(sbi->s_chksum_driver);
+                       sbi->s_chksum_driver = NULL;
+                       goto failed_mount;
+               }
+       }
+
+       /* Check superblock checksum */
+       if (!ext4_superblock_csum_verify(sb, es)) {
+               ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+                        "invalid superblock checksum.  Run e2fsck?");
+               silent = 1;
+               goto cantfind_ext4;
+       }
+
+       /* Precompute checksum seed for all metadata */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
+                                              sizeof(es->s_uuid));
+
        /* Set defaults before we parse the mount options */
        def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
        set_opt(sb, INIT_INODE_TABLE);
@@ -3200,7 +3370,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                               "Can't read superblock on 2nd try");
                        goto failed_mount;
                }
-               es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
+               es = (struct ext4_super_block *)(bh->b_data + offset);
                sbi->s_es = es;
                if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
                        ext4_msg(sb, KERN_ERR,
@@ -3392,6 +3562,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                          GFP_KERNEL);
        if (sbi->s_group_desc == NULL) {
                ext4_msg(sb, KERN_ERR, "not enough memory");
+               ret = -ENOMEM;
                goto failed_mount;
        }
 
@@ -3449,6 +3620,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
        if (err) {
                ext4_msg(sb, KERN_ERR, "insufficient memory");
+               ret = err;
                goto failed_mount3;
        }
 
@@ -3506,26 +3678,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                goto no_journal;
        }
 
-       if (ext4_blocks_count(es) > 0xffffffffULL &&
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT) &&
            !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
                                       JBD2_FEATURE_INCOMPAT_64BIT)) {
                ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
                goto failed_mount_wq;
        }
 
-       if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
-               jbd2_journal_set_features(sbi->s_journal,
-                               JBD2_FEATURE_COMPAT_CHECKSUM, 0,
-                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
-       } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
-               jbd2_journal_set_features(sbi->s_journal,
-                               JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
-               jbd2_journal_clear_features(sbi->s_journal, 0, 0,
-                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
-       } else {
-               jbd2_journal_clear_features(sbi->s_journal,
-                               JBD2_FEATURE_COMPAT_CHECKSUM, 0,
-                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+       if (!set_journal_csum_feature_set(sb)) {
+               ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
+                        "feature set");
+               goto failed_mount_wq;
        }
 
        /* We have now updated the journal if required, so we can
@@ -3606,7 +3769,8 @@ no_journal:
                goto failed_mount4;
        }
 
-       ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
+       if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
+               sb->s_flags |= MS_RDONLY;
 
        /* determine the minimum size of new large inodes, if present */
        if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
@@ -3641,7 +3805,7 @@ no_journal:
        }
 
        ext4_ext_init(sb);
-       err = ext4_mb_init(sb, needs_recovery);
+       err = ext4_mb_init(sb);
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
                         err);
@@ -3724,6 +3888,8 @@ failed_mount2:
                brelse(sbi->s_group_desc[i]);
        ext4_kvfree(sbi->s_group_desc);
 failed_mount:
+       if (sbi->s_chksum_driver)
+               crypto_free_shash(sbi->s_chksum_driver);
        if (sbi->s_proc) {
                remove_proc_entry("options", sbi->s_proc);
                remove_proc_entry(sb->s_id, ext4_proc_root);
@@ -3847,7 +4013,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
                goto out_bdev;
        }
 
-       es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+       es = (struct ext4_super_block *) (bh->b_data + offset);
        if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
            !(le32_to_cpu(es->s_feature_incompat) &
              EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
@@ -4039,6 +4205,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                                &EXT4_SB(sb)->s_freeinodes_counter));
        sb->s_dirt = 0;
        BUFFER_TRACE(sbh, "marking dirty");
+       ext4_superblock_csum_set(sb, es);
        mark_buffer_dirty(sbh);
        if (sync) {
                error = sync_dirty_buffer(sbh);
@@ -4333,7 +4500,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                struct ext4_group_desc *gdp =
                                        ext4_get_group_desc(sb, g, NULL);
 
-                               if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
+                               if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
                                        ext4_msg(sb, KERN_ERR,
               "ext4_remount: Checksum for group %u failed (%u!=%u)",
                g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
index e88748e55c0f246e90ca21c2094303719f83df07..e56c9ed7d6e30d523b7f8e4b638f9190427cf50d 100644 (file)
@@ -122,6 +122,58 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
        NULL
 };
 
+static __le32 ext4_xattr_block_csum(struct inode *inode,
+                                   sector_t block_nr,
+                                   struct ext4_xattr_header *hdr)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __u32 csum, old;
+
+       old = hdr->h_checksum;
+       hdr->h_checksum = 0;
+       if (le32_to_cpu(hdr->h_refcount) != 1) {
+               block_nr = cpu_to_le64(block_nr);
+               csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
+                                  sizeof(block_nr));
+       } else
+               csum = ei->i_csum_seed;
+       csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
+                          EXT4_BLOCK_SIZE(inode->i_sb));
+       hdr->h_checksum = old;
+       return cpu_to_le32(csum);
+}
+
+static int ext4_xattr_block_csum_verify(struct inode *inode,
+                                       sector_t block_nr,
+                                       struct ext4_xattr_header *hdr)
+{
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+           (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
+               return 0;
+       return 1;
+}
+
+static void ext4_xattr_block_csum_set(struct inode *inode,
+                                     sector_t block_nr,
+                                     struct ext4_xattr_header *hdr)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+}
+
+static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
+                                               struct inode *inode,
+                                               struct buffer_head *bh)
+{
+       ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
+       return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
 static inline const struct xattr_handler *
 ext4_xattr_handler(int name_index)
 {
@@ -156,12 +208,22 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
 }
 
 static inline int
-ext4_xattr_check_block(struct buffer_head *bh)
+ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
 {
+       int error;
+
+       if (buffer_verified(bh))
+               return 0;
+
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                return -EIO;
-       return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+       if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+               return -EIO;
+       error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+       if (!error)
+               set_buffer_verified(bh);
+       return error;
 }
 
 static inline int
@@ -224,7 +286,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
                goto cleanup;
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-       if (ext4_xattr_check_block(bh)) {
+       if (ext4_xattr_check_block(inode, bh)) {
 bad_block:
                EXT4_ERROR_INODE(inode, "bad block %llu",
                                 EXT4_I(inode)->i_file_acl);
@@ -369,7 +431,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
                goto cleanup;
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-       if (ext4_xattr_check_block(bh)) {
+       if (ext4_xattr_check_block(inode, bh)) {
                EXT4_ERROR_INODE(inode, "bad block %llu",
                                 EXT4_I(inode)->i_file_acl);
                error = -EIO;
@@ -492,7 +554,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                if (ce)
                        mb_cache_entry_release(ce);
                unlock_buffer(bh);
-               error = ext4_handle_dirty_metadata(handle, inode, bh);
+               error = ext4_handle_dirty_xattr_block(handle, inode, bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
                dquot_free_block(inode, 1);
@@ -662,7 +724,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
                ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
                        atomic_read(&(bs->bh->b_count)),
                        le32_to_cpu(BHDR(bs->bh)->h_refcount));
-               if (ext4_xattr_check_block(bs->bh)) {
+               if (ext4_xattr_check_block(inode, bs->bh)) {
                        EXT4_ERROR_INODE(inode, "bad block %llu",
                                         EXT4_I(inode)->i_file_acl);
                        error = -EIO;
@@ -725,9 +787,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                        if (error == -EIO)
                                goto bad_block;
                        if (!error)
-                               error = ext4_handle_dirty_metadata(handle,
-                                                                  inode,
-                                                                  bs->bh);
+                               error = ext4_handle_dirty_xattr_block(handle,
+                                                                     inode,
+                                                                     bs->bh);
                        if (error)
                                goto cleanup;
                        goto inserted;
@@ -796,9 +858,9 @@ inserted:
                                ea_bdebug(new_bh, "reusing; refcount now=%d",
                                        le32_to_cpu(BHDR(new_bh)->h_refcount));
                                unlock_buffer(new_bh);
-                               error = ext4_handle_dirty_metadata(handle,
-                                                                  inode,
-                                                                  new_bh);
+                               error = ext4_handle_dirty_xattr_block(handle,
+                                                                     inode,
+                                                                     new_bh);
                                if (error)
                                        goto cleanup_dquot;
                        }
@@ -855,8 +917,8 @@ getblk_failed:
                        set_buffer_uptodate(new_bh);
                        unlock_buffer(new_bh);
                        ext4_xattr_cache_insert(new_bh);
-                       error = ext4_handle_dirty_metadata(handle,
-                                                          inode, new_bh);
+                       error = ext4_handle_dirty_xattr_block(handle,
+                                                             inode, new_bh);
                        if (error)
                                goto cleanup;
                }
@@ -1193,7 +1255,7 @@ retry:
                error = -EIO;
                if (!bh)
                        goto cleanup;
-               if (ext4_xattr_check_block(bh)) {
+               if (ext4_xattr_check_block(inode, bh)) {
                        EXT4_ERROR_INODE(inode, "bad block %llu",
                                         EXT4_I(inode)->i_file_acl);
                        error = -EIO;
index 25b7387ff183f880cdb9ccaf2529ca8c0f218a7b..91f31ca7d9af9df24a965c64bb0271c43a4d4b09 100644 (file)
@@ -27,7 +27,9 @@ struct ext4_xattr_header {
        __le32  h_refcount;     /* reference count */
        __le32  h_blocks;       /* number of disk blocks used */
        __le32  h_hash;         /* hash value of all attributes */
-       __u32   h_reserved[4];  /* zero right now */
+       __le32  h_checksum;     /* crc32c(uuid+id+xattrblock) */
+                               /* id = inum if refcount=1, blknum otherwise */
+       __u32   h_reserved[3];  /* zero right now */
 };
 
 struct ext4_xattr_ibody_header {
index aca191bd5f8fa66bcef77c549b647fce81e28b0f..6eaa28c98ad1e9038dd939f2960d31034d8663a3 100644 (file)
@@ -98,8 +98,8 @@ next:
 
        *bh = sb_bread(sb, phys);
        if (*bh == NULL) {
-               fat_msg(sb, KERN_ERR, "Directory bread(block %llu) failed",
-                      (llu)phys);
+               fat_msg_ratelimit(sb, KERN_ERR,
+                       "Directory bread(block %llu) failed", (llu)phys);
                /* skip this block */
                *pos = (iblock + 1) << sb->s_blocksize_bits;
                goto next;
index 66994f316e18d11f052c85b97922cc1407347a70..fc35c5c69136e805b41ffa6102dc1878d68f7a3f 100644 (file)
@@ -82,6 +82,7 @@ struct msdos_sb_info {
        int fatent_shift;
        struct fatent_operations *fatent_ops;
        struct inode *fat_inode;
+       struct inode *fsinfo_inode;
 
        struct ratelimit_state ratelimit;
 
@@ -334,6 +335,11 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
        __fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
 __printf(3, 4) __cold
 void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+#define fat_msg_ratelimit(sb, level, fmt, args...)     \
+       do {    \
+                       if (__ratelimit(&MSDOS_SB(sb)->ratelimit))      \
+                               fat_msg(sb, level, fmt, ## args);       \
+        } while (0)
 extern int fat_clusters_flush(struct super_block *sb);
 extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
 extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
index 2e81ac0df7e2eae4b30ad9b2e1db360798f92b2c..31f08ab62c562d1926a75183c802793642cd390c 100644 (file)
@@ -308,6 +308,16 @@ void fat_ent_access_init(struct super_block *sb)
        }
 }
 
+static void mark_fsinfo_dirty(struct super_block *sb)
+{
+       struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+       if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
+               return;
+
+       __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
+}
+
 static inline int fat_ent_update_ptr(struct super_block *sb,
                                     struct fat_entry *fatent,
                                     int offset, sector_t blocknr)
@@ -498,7 +508,6 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
                                sbi->prev_free = entry;
                                if (sbi->free_clusters != -1)
                                        sbi->free_clusters--;
-                               sb->s_dirt = 1;
 
                                cluster[idx_clus] = entry;
                                idx_clus++;
@@ -520,11 +529,11 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
        /* Couldn't allocate the free entries */
        sbi->free_clusters = 0;
        sbi->free_clus_valid = 1;
-       sb->s_dirt = 1;
        err = -ENOSPC;
 
 out:
        unlock_fat(sbi);
+       mark_fsinfo_dirty(sb);
        fatent_brelse(&fatent);
        if (!err) {
                if (inode_needs_sync(inode))
@@ -549,7 +558,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
        struct fat_entry fatent;
        struct buffer_head *bhs[MAX_BUF_PER_PAGE];
        int i, err, nr_bhs;
-       int first_cl = cluster;
+       int first_cl = cluster, dirty_fsinfo = 0;
 
        nr_bhs = 0;
        fatent_init(&fatent);
@@ -587,7 +596,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                ops->ent_put(&fatent, FAT_ENT_FREE);
                if (sbi->free_clusters != -1) {
                        sbi->free_clusters++;
-                       sb->s_dirt = 1;
+                       dirty_fsinfo = 1;
                }
 
                if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
@@ -617,6 +626,8 @@ error:
        for (i = 0; i < nr_bhs; i++)
                brelse(bhs[i]);
        unlock_fat(sbi);
+       if (dirty_fsinfo)
+               mark_fsinfo_dirty(sb);
 
        return err;
 }
@@ -677,7 +688,7 @@ int fat_count_free_clusters(struct super_block *sb)
        }
        sbi->free_clusters = free;
        sbi->free_clus_valid = 1;
-       sb->s_dirt = 1;
+       mark_fsinfo_dirty(sb);
        fatent_brelse(&fatent);
 out:
        unlock_fat(sbi);
index b3d290c1b51392ac949bdf8e00514e29e29bd2e9..0038b32cb36276d537f2ec81a46469bf3ad221b1 100644 (file)
@@ -459,37 +459,11 @@ static void fat_evict_inode(struct inode *inode)
        fat_detach(inode);
 }
 
-static void fat_write_super(struct super_block *sb)
-{
-       lock_super(sb);
-       sb->s_dirt = 0;
-
-       if (!(sb->s_flags & MS_RDONLY))
-               fat_clusters_flush(sb);
-       unlock_super(sb);
-}
-
-static int fat_sync_fs(struct super_block *sb, int wait)
-{
-       int err = 0;
-
-       if (sb->s_dirt) {
-               lock_super(sb);
-               sb->s_dirt = 0;
-               err = fat_clusters_flush(sb);
-               unlock_super(sb);
-       }
-
-       return err;
-}
-
 static void fat_put_super(struct super_block *sb)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 
-       if (sb->s_dirt)
-               fat_write_super(sb);
-
+       iput(sbi->fsinfo_inode);
        iput(sbi->fat_inode);
 
        unload_nls(sbi->nls_disk);
@@ -661,7 +635,18 @@ retry:
 
 static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-       return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+       int err;
+
+       if (inode->i_ino == MSDOS_FSINFO_INO) {
+               struct super_block *sb = inode->i_sb;
+
+               lock_super(sb);
+               err = fat_clusters_flush(sb);
+               unlock_super(sb);
+       } else
+               err = __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+
+       return err;
 }
 
 int fat_sync_inode(struct inode *inode)
@@ -678,8 +663,6 @@ static const struct super_operations fat_sops = {
        .write_inode    = fat_write_inode,
        .evict_inode    = fat_evict_inode,
        .put_super      = fat_put_super,
-       .write_super    = fat_write_super,
-       .sync_fs        = fat_sync_fs,
        .statfs         = fat_statfs,
        .remount_fs     = fat_remount,
 
@@ -752,28 +735,26 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb,
 }
 
 static int
-fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable)
+fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
 {
        int len = *lenp;
-       struct inode *inode =  de->d_inode;
-       u32 ipos_h, ipos_m, ipos_l;
+       struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+       loff_t i_pos;
 
        if (len < 5) {
                *lenp = 5;
                return 255; /* no room */
        }
 
-       ipos_h = MSDOS_I(inode)->i_pos >> 8;
-       ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24;
-       ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28;
+       i_pos = fat_i_pos_read(sbi, inode);
        *lenp = 5;
        fh[0] = inode->i_ino;
        fh[1] = inode->i_generation;
-       fh[2] = ipos_h;
-       fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
-       spin_lock(&de->d_lock);
-       fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart;
-       spin_unlock(&de->d_lock);
+       fh[2] = i_pos >> 8;
+       fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart;
+       fh[4] = (i_pos & 0x0f) << 28;
+       if (parent)
+               fh[4] |= MSDOS_I(parent)->i_logstart;
        return 3;
 }
 
@@ -1244,6 +1225,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                   void (*setup)(struct super_block *))
 {
        struct inode *root_inode = NULL, *fat_inode = NULL;
+       struct inode *fsinfo_inode = NULL;
        struct buffer_head *bh;
        struct fat_boot_sector *b;
        struct msdos_sb_info *sbi;
@@ -1490,6 +1472,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                goto out_fail;
        MSDOS_I(fat_inode)->i_pos = 0;
        sbi->fat_inode = fat_inode;
+
+       fsinfo_inode = new_inode(sb);
+       if (!fsinfo_inode)
+               goto out_fail;
+       fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+       sbi->fsinfo_inode = fsinfo_inode;
+       insert_inode_hash(fsinfo_inode);
+
        root_inode = new_inode(sb);
        if (!root_inode)
                goto out_fail;
@@ -1516,6 +1506,8 @@ out_invalid:
                fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
 
 out_fail:
+       if (fsinfo_inode)
+               iput(fsinfo_inode);
        if (fat_inode)
                iput(fat_inode);
        unload_nls(sbi->nls_io);
index d078b75572a75eb9117092ee5bb752c84e1b38b8..81b70e665bf000412f73aa300890a53823db36f0 100644 (file)
@@ -442,28 +442,24 @@ static int check_fcntl_cmd(unsigned cmd)
 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {      
        struct file *filp;
+       int fput_needed;
        long err = -EBADF;
 
-       filp = fget_raw(fd);
+       filp = fget_raw_light(fd, &fput_needed);
        if (!filp)
                goto out;
 
        if (unlikely(filp->f_mode & FMODE_PATH)) {
-               if (!check_fcntl_cmd(cmd)) {
-                       fput(filp);
-                       goto out;
-               }
+               if (!check_fcntl_cmd(cmd))
+                       goto out1;
        }
 
        err = security_file_fcntl(filp, cmd, arg);
-       if (err) {
-               fput(filp);
-               return err;
-       }
+       if (!err)
+               err = do_fcntl(fd, cmd, arg, filp);
 
-       err = do_fcntl(fd, cmd, arg, filp);
-
-       fput(filp);
+out1:
+       fput_light(filp, fput_needed);
 out:
        return err;
 }
@@ -473,26 +469,21 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
                unsigned long, arg)
 {      
        struct file * filp;
-       long err;
+       long err = -EBADF;
+       int fput_needed;
 
-       err = -EBADF;
-       filp = fget_raw(fd);
+       filp = fget_raw_light(fd, &fput_needed);
        if (!filp)
                goto out;
 
        if (unlikely(filp->f_mode & FMODE_PATH)) {
-               if (!check_fcntl_cmd(cmd)) {
-                       fput(filp);
-                       goto out;
-               }
+               if (!check_fcntl_cmd(cmd))
+                       goto out1;
        }
 
        err = security_file_fcntl(filp, cmd, arg);
-       if (err) {
-               fput(filp);
-               return err;
-       }
-       err = -EBADF;
+       if (err)
+               goto out1;
        
        switch (cmd) {
                case F_GETLK64:
@@ -507,7 +498,8 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
                        err = do_fcntl(fd, cmd, arg, filp);
                        break;
        }
-       fput(filp);
+out1:
+       fput_light(filp, fput_needed);
 out:
        return err;
 }
index b1a524d798e720cf18ad7ad4decdab430a2b9c5e..cf6f4345ceb0125baf86b7a029e7fdf8712f76c3 100644 (file)
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/pipe_fs_i.h>
 
-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+static int wait_for_partner(struct inode* inode, unsigned int *cnt)
 {
        int cur = *cnt; 
 
@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
                if (signal_pending(current))
                        break;
        }
+       return cur == *cnt ? -ERESTARTSYS : 0;
 }
 
 static void wake_up_partner(struct inode* inode)
@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                                 * seen a writer */
                                filp->f_version = pipe->w_counter;
                        } else {
-                               wait_for_partner(inode, &pipe->w_counter);
-                               if(signal_pending(current))
+                               if (wait_for_partner(inode, &pipe->w_counter))
                                        goto err_rd;
                        }
                }
@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                        wake_up_partner(inode);
 
                if (!pipe->readers) {
-                       wait_for_partner(inode, &pipe->r_counter);
-                       if (signal_pending(current))
+                       if (wait_for_partner(inode, &pipe->r_counter))
                                goto err_wr;
                }
                break;
index 70f2a0fd6aec62b28724d46e356dc0ff871f88b8..a305d9e2d1b2aac05dcd456bdd23885652272439 100644 (file)
@@ -34,7 +34,6 @@ struct files_stat_struct files_stat = {
        .max_files = NR_FILE
 };
 
-DECLARE_LGLOCK(files_lglock);
 DEFINE_LGLOCK(files_lglock);
 
 /* SLAB cache for file structures */
@@ -421,9 +420,9 @@ static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
  */
 void file_sb_list_add(struct file *file, struct super_block *sb)
 {
-       lg_local_lock(files_lglock);
+       lg_local_lock(&files_lglock);
        __file_sb_list_add(file, sb);
-       lg_local_unlock(files_lglock);
+       lg_local_unlock(&files_lglock);
 }
 
 /**
@@ -436,9 +435,9 @@ void file_sb_list_add(struct file *file, struct super_block *sb)
 void file_sb_list_del(struct file *file)
 {
        if (!list_empty(&file->f_u.fu_list)) {
-               lg_local_lock_cpu(files_lglock, file_list_cpu(file));
+               lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
                list_del_init(&file->f_u.fu_list);
-               lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
+               lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
        }
 }
 
@@ -485,7 +484,7 @@ void mark_files_ro(struct super_block *sb)
        struct file *f;
 
 retry:
-       lg_global_lock(files_lglock);
+       lg_global_lock(&files_lglock);
        do_file_list_for_each_entry(sb, f) {
                struct vfsmount *mnt;
                if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
@@ -502,12 +501,12 @@ retry:
                file_release_write(f);
                mnt = mntget(f->f_path.mnt);
                /* This can sleep, so we can't hold the spinlock. */
-               lg_global_unlock(files_lglock);
+               lg_global_unlock(&files_lglock);
                mnt_drop_write(mnt);
                mntput(mnt);
                goto retry;
        } while_file_list_for_each_entry;
-       lg_global_unlock(files_lglock);
+       lg_global_unlock(&files_lglock);
 }
 
 void __init files_init(unsigned long mempages)
@@ -525,6 +524,6 @@ void __init files_init(unsigned long mempages)
        n = (mempages * (PAGE_SIZE / 1024)) / 10;
        files_stat.max_files = max_t(unsigned long, n, NR_FILE);
        files_defer_init();
-       lg_lock_init(files_lglock);
+       lg_lock_init(&files_lglock, "files_lglock");
        percpu_counter_init(&nr_files, 0);
 } 
index 8d2fb8c88cf36a196c47f473bcc729510ad89d8e..41a3ccff18d87bdfc3f65148e2f39f8067f3a8ce 100644 (file)
@@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb,
                        /* Wait for I_SYNC. This function drops i_lock... */
                        inode_sleep_on_writeback(inode);
                        /* Inode may be gone, start again */
+                       spin_lock(&wb->list_lock);
                        continue;
                }
                inode->i_state |= I_SYNC;
index 42593c587d48509604f3b9cfd8c0f9ffefe9ba0e..03ff5b1eba93ec21e11d9ca31f9c8b3f22e65d36 100644 (file)
@@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf,
                                     unsigned global_limit)
 {
        unsigned long t;
-       char tmp[32];
        unsigned limit = (1 << 16) - 1;
        int err;
 
-       if (*ppos || count >= sizeof(tmp) - 1)
-               return -EINVAL;
-
-       if (copy_from_user(tmp, buf, count))
+       if (*ppos)
                return -EINVAL;
 
-       tmp[count] = '\0';
-
-       err = strict_strtoul(tmp, 0, &t);
+       err = kstrtoul_from_user(buf, count, 0, &t);
        if (err)
                return err;
 
index df5ac048dc74e6b33174a69dc36b3a5b6084fd74..334e0b18a014c72bf78583f76a6c3b02f9058181 100644 (file)
@@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
                          struct kstat *stat)
 {
+       unsigned int blkbits;
+
        stat->dev = inode->i_sb->s_dev;
        stat->ino = attr->ino;
        stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
        stat->ctime.tv_nsec = attr->ctimensec;
        stat->size = attr->size;
        stat->blocks = attr->blocks;
-       stat->blksize = (1 << inode->i_blkbits);
+
+       if (attr->blksize != 0)
+               blkbits = ilog2(attr->blksize);
+       else
+               blkbits = inode->i_sb->s_blocksize_bits;
+
+       stat->blksize = 1 << blkbits;
 }
 
 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
@@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
                if (stat) {
                        generic_fillattr(inode, stat);
                        stat->mode = fi->orig_i_mode;
+                       stat->ino = fi->orig_ino;
                }
        }
 
index 504e61b7fd7515f8aafe7e3b9edd2c9fa42fd91d..b321a688cde79aa0f2923f3440330d03f0e5ee1e 100644 (file)
@@ -962,7 +962,9 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                goto out;
 
-       file_update_time(file);
+       err = file_update_time(file);
+       if (err)
+               goto out;
 
        if (file->f_flags & O_DIRECT) {
                written = generic_file_direct_write(iocb, iov, &nr_segs,
@@ -2171,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
+long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+                           loff_t length)
+{
+       struct fuse_file *ff = file->private_data;
+       struct fuse_conn *fc = ff->fc;
+       struct fuse_req *req;
+       struct fuse_fallocate_in inarg = {
+               .fh = ff->fh,
+               .offset = offset,
+               .length = length,
+               .mode = mode
+       };
+       int err;
+
+       if (fc->no_fallocate)
+               return -EOPNOTSUPP;
+
+       req = fuse_get_req(fc);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       req->in.h.opcode = FUSE_FALLOCATE;
+       req->in.h.nodeid = ff->nodeid;
+       req->in.numargs = 1;
+       req->in.args[0].size = sizeof(inarg);
+       req->in.args[0].value = &inarg;
+       fuse_request_send(fc, req);
+       err = req->out.h.error;
+       if (err == -ENOSYS) {
+               fc->no_fallocate = 1;
+               err = -EOPNOTSUPP;
+       }
+       fuse_put_request(fc, req);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(fuse_file_fallocate);
+
 static const struct file_operations fuse_file_operations = {
        .llseek         = fuse_file_llseek,
        .read           = do_sync_read,
@@ -2188,6 +2228,7 @@ static const struct file_operations fuse_file_operations = {
        .unlocked_ioctl = fuse_file_ioctl,
        .compat_ioctl   = fuse_file_compat_ioctl,
        .poll           = fuse_file_poll,
+       .fallocate      = fuse_file_fallocate,
 };
 
 static const struct file_operations fuse_direct_io_file_operations = {
@@ -2204,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
        .unlocked_ioctl = fuse_file_ioctl,
        .compat_ioctl   = fuse_file_compat_ioctl,
        .poll           = fuse_file_poll,
+       .fallocate      = fuse_file_fallocate,
        /* no splice_read */
 };
 
index 572cefc7801296ce1b15cba6223f09d9a9826072..771fb6322c0750d223bb2ee7873f6d0cdbdef41b 100644 (file)
@@ -82,6 +82,9 @@ struct fuse_inode {
            preserve the original mode */
        umode_t orig_i_mode;
 
+       /** 64 bit inode number */
+       u64 orig_ino;
+
        /** Version of last attribute change */
        u64 attr_version;
 
@@ -478,6 +481,9 @@ struct fuse_conn {
        /** Are BSD file locking primitives not implemented by fs? */
        unsigned no_flock:1;
 
+       /** Is fallocate not implemented by fs? */
+       unsigned no_fallocate:1;
+
        /** The number of requests waiting for completion */
        atomic_t num_waiting;
 
index 56f6dcf307684287bad491b8711fa3a4ef4f0633..1cd61652018c7c8547a060344ef998ee6ae96879 100644 (file)
@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->nlookup = 0;
        fi->attr_version = 0;
        fi->writectr = 0;
+       fi->orig_ino = 0;
        INIT_LIST_HEAD(&fi->write_files);
        INIT_LIST_HEAD(&fi->queued_writes);
        INIT_LIST_HEAD(&fi->writepages);
@@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
        return 0;
 }
 
+/*
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
+ */
+static ino_t fuse_squash_ino(u64 ino64)
+{
+       ino_t ino = (ino_t) ino64;
+       if (sizeof(ino_t) < sizeof(u64))
+               ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
+       return ino;
+}
+
 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
                                   u64 attr_valid)
 {
@@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->attr_version = ++fc->attr_version;
        fi->i_time = attr_valid;
 
-       inode->i_ino     = attr->ino;
+       inode->i_ino     = fuse_squash_ino(attr->ino);
        inode->i_mode    = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
        set_nlink(inode, attr->nlink);
        inode->i_uid     = attr->uid;
@@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->orig_i_mode = inode->i_mode;
        if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
                inode->i_mode &= ~S_ISVTX;
+
+       fi->orig_ino = attr->ino;
 }
 
 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
@@ -627,12 +642,10 @@ static struct dentry *fuse_get_dentry(struct super_block *sb,
        return ERR_PTR(err);
 }
 
-static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
-                          int connectable)
+static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+                          struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
-       bool encode_parent = connectable && !S_ISDIR(inode->i_mode);
-       int len = encode_parent ? 6 : 3;
+       int len = parent ? 6 : 3;
        u64 nodeid;
        u32 generation;
 
@@ -648,14 +661,9 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        fh[1] = (u32)(nodeid & 0xffffffff);
        fh[2] = generation;
 
-       if (encode_parent) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                nodeid = get_fuse_inode(parent)->nodeid;
                generation = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
 
                fh[3] = (u32)(nodeid >> 32);
                fh[4] = (u32)(nodeid & 0xffffffff);
@@ -663,7 +671,7 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        }
 
        *max_len = len;
-       return encode_parent ? 0x82 : 0x81;
+       return parent ? 0x82 : 0x81;
 }
 
 static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
index 70ba891654f8ce3582c456e208feda6d56e90a1a..e8ed6d4a6181132ff47960dc118cd6fb60c1b81c 100644 (file)
 #define GFS2_LARGE_FH_SIZE 8
 #define GFS2_OLD_FH_SIZE 10
 
-static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
-                         int connectable)
+static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
+                         struct inode *parent)
 {
        __be32 *fh = (__force __be32 *)p;
-       struct inode *inode = dentry->d_inode;
        struct super_block *sb = inode->i_sb;
        struct gfs2_inode *ip = GFS2_I(inode);
 
-       if (connectable && (*len < GFS2_LARGE_FH_SIZE)) {
+       if (parent && (*len < GFS2_LARGE_FH_SIZE)) {
                *len = GFS2_LARGE_FH_SIZE;
                return 255;
        } else if (*len < GFS2_SMALL_FH_SIZE) {
@@ -50,14 +49,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
        fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
        *len = GFS2_SMALL_FH_SIZE;
 
-       if (!connectable || inode == sb->s_root->d_inode)
+       if (!parent || inode == sb->s_root->d_inode)
                return *len;
 
-       spin_lock(&dentry->d_lock);
-       inode = dentry->d_parent->d_inode;
-       ip = GFS2_I(inode);
-       igrab(inode);
-       spin_unlock(&dentry->d_lock);
+       ip = GFS2_I(parent);
 
        fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32);
        fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF);
@@ -65,8 +60,6 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
        fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
        *len = GFS2_LARGE_FH_SIZE;
 
-       iput(inode);
-
        return *len;
 }
 
index c640ba57074b8ba58c46c148815c714f20b48939..09addc8615fa6603e28d2fa718e5a32c1762acbd 100644 (file)
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
        struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
        struct hfsplus_vh *vh = sbi->s_vhdr;
        struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
+       u32 cnid = (unsigned long)dentry->d_fsdata;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
        vh->finder_info[0] = bvh->finder_info[0] =
                cpu_to_be32(parent_ino(dentry));
 
-       /* Bootloader */
-       vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino);
+       /*
+        * Bootloader. Just using the inode here breaks in the case of
+        * hard links - the firmware wants the ID of the hard link file,
+        * but the inode points at the indirect inode
+        */
+       vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid);
 
        /* Per spec, the OS X system folder - same as finder_info[0] here */
        vh->finder_info[5] = bvh->finder_info[5] =
index 7daf4b852d1c78ca89c31d791e9eddeb9dcf1a0d..90effcccca9af4c13f6b44b7b3fe756612f9d735 100644 (file)
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct bio *bio;
        int ret = 0;
-       unsigned int io_size;
+       u64 io_size;
        loff_t start;
        int offset;
 
index 7a5eb2c718c854206d6db419abe4ce7bc61d12c7..cdb84a8380682b5f341138cb6f75e2754434e073 100644 (file)
@@ -16,9 +16,9 @@
 static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
 {
        struct quad_buffer_head qbh;
-       u32 *bmp;
+       __le32 *bmp;
        if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
-       if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
+       if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
                hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
                goto fail1;
        }
@@ -62,7 +62,7 @@ int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
 static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
 {
        struct quad_buffer_head qbh;
-       unsigned *bmp;
+       __le32 *bmp;
        unsigned bs = near & ~0x3fff;
        unsigned nr = (near & 0x3fff) & ~(n - 1);
        /*unsigned mnr;*/
@@ -236,7 +236,7 @@ static secno alloc_in_dirband(struct super_block *s, secno near)
 int hpfs_alloc_if_possible(struct super_block *s, secno sec)
 {
        struct quad_buffer_head qbh;
-       u32 *bmp;
+       __le32 *bmp;
        if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
        if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
                bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
@@ -254,7 +254,7 @@ int hpfs_alloc_if_possible(struct super_block *s, secno sec)
 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
 {
        struct quad_buffer_head qbh;
-       u32 *bmp;
+       __le32 *bmp;
        struct hpfs_sb_info *sbi = hpfs_sb(s);
        /*printk("2 - ");*/
        if (!n) return;
@@ -299,7 +299,7 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
        int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
        int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
        int i, j;
-       u32 *bmp;
+       __le32 *bmp;
        struct quad_buffer_head qbh;
        if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
                for (j = 0; j < 512; j++) {
@@ -351,7 +351,7 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
                hpfs_free_sectors(s, dno, 4);
        } else {
                struct quad_buffer_head qbh;
-               u32 *bmp;
+               __le32 *bmp;
                unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
                if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
                        return;
index 08b503e8ed29ec610a098cb9658e1a2ecaa1779c..4bae4a4a60b1936eba70d17d18e7d4a016ed54b9 100644 (file)
@@ -20,7 +20,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
        int c1, c2 = 0;
        go_down:
        if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
-       if (btree->internal) {
+       if (bp_internal(btree)) {
                for (i = 0; i < btree->n_used_nodes; i++)
                        if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
                                a = le32_to_cpu(btree->u.internal[i].down);
@@ -82,7 +82,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                brelse(bh);
                return -1;
        }
-       if (btree->internal) {
+       if (bp_internal(btree)) {
                a = le32_to_cpu(btree->u.internal[n].down);
                btree->u.internal[n].file_secno = cpu_to_le32(-1);
                mark_buffer_dirty(bh);
@@ -129,12 +129,12 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                }
                if (a == node && fnod) {
                        anode->up = cpu_to_le32(node);
-                       anode->btree.fnode_parent = 1;
+                       anode->btree.flags |= BP_fnode_parent;
                        anode->btree.n_used_nodes = btree->n_used_nodes;
                        anode->btree.first_free = btree->first_free;
                        anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
                        memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
-                       btree->internal = 1;
+                       btree->flags |= BP_internal;
                        btree->n_free_nodes = 11;
                        btree->n_used_nodes = 1;
                        btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
@@ -184,7 +184,10 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                        hpfs_free_sectors(s, ra, 1);
                        if ((anode = hpfs_map_anode(s, na, &bh))) {
                                anode->up = cpu_to_le32(up);
-                               anode->btree.fnode_parent = up == node && fnod;
+                               if (up == node && fnod)
+                                       anode->btree.flags |= BP_fnode_parent;
+                               else
+                                       anode->btree.flags &= ~BP_fnode_parent;
                                mark_buffer_dirty(bh);
                                brelse(bh);
                        }
@@ -198,7 +201,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
                        anode = new_anode;
                        /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
-                       anode->btree.internal = 1;
+                       anode->btree.flags |= BP_internal;
                        anode->btree.n_used_nodes = 1;
                        anode->btree.n_free_nodes = 59;
                        anode->btree.first_free = cpu_to_le16(16);
@@ -215,7 +218,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
        }
        if ((anode = hpfs_map_anode(s, na, &bh))) {
                anode->up = cpu_to_le32(node);
-               if (fnod) anode->btree.fnode_parent = 1;
+               if (fnod)
+                       anode->btree.flags |= BP_fnode_parent;
                mark_buffer_dirty(bh);
                brelse(bh);
        }
@@ -234,18 +238,19 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
        }
        ranode->up = cpu_to_le32(node);
        memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
-       if (fnod) ranode->btree.fnode_parent = 1;
-       ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
-       if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
+       if (fnod)
+               ranode->btree.flags |= BP_fnode_parent;
+       ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
+       if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
                struct anode *unode;
                if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
                        unode->up = cpu_to_le32(ra);
-                       unode->btree.fnode_parent = 0;
+                       unode->btree.flags &= ~BP_fnode_parent;
                        mark_buffer_dirty(bh1);
                        brelse(bh1);
                }
        }
-       btree->internal = 1;
+       btree->flags |= BP_internal;
        btree->n_free_nodes = fnod ? 10 : 58;
        btree->n_used_nodes = 2;
        btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
@@ -278,7 +283,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
        int d1, d2;
        go_down:
        d2 = 0;
-       while (btree1->internal) {
+       while (bp_internal(btree1)) {
                ano = le32_to_cpu(btree1->u.internal[pos].down);
                if (level) brelse(bh);
                if (hpfs_sb(s)->sb_chk)
@@ -412,13 +417,13 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
                        btree->n_free_nodes = 8;
                        btree->n_used_nodes = 0;
                        btree->first_free = cpu_to_le16(8);
-                       btree->internal = 0;
+                       btree->flags &= ~BP_internal;
                        mark_buffer_dirty(bh);
                } else hpfs_free_sectors(s, f, 1);
                brelse(bh);
                return;
        }
-       while (btree->internal) {
+       while (bp_internal(btree)) {
                nodes = btree->n_used_nodes + btree->n_free_nodes;
                for (i = 0; i < btree->n_used_nodes; i++)
                        if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
@@ -479,13 +484,13 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
        struct extended_attribute *ea;
        struct extended_attribute *ea_end;
        if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
-       if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
+       if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
        else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
        ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
-               if (ea->indirect)
-                       hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
-       hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
+               if (ea_indirect(ea))
+                       hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
+       hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
        brelse(bh);
        hpfs_free_sectors(s, fno, 1);
 }
index 9ecde27d1e297ed3d42a742352f469f1bf2dfb9a..f49d1498aa2e98d3a1bc46fff59d67ca45f04a4e 100644 (file)
@@ -156,7 +156,6 @@ void hpfs_brelse4(struct quad_buffer_head *qbh)
 
 void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
 {
-       PRINTK(("hpfs_mark_4buffers_dirty\n"));
        memcpy(qbh->bh[0]->b_data, qbh->data, 512);
        memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
        memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
index 2fa0089a02a8ec2934cda55cbbae18e50c34a4ea..b8472f803f4e54ea5039b85ac36cfdf33a48925b 100644 (file)
@@ -87,7 +87,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        ret = -EIOERROR;
                        goto out;
                }
-               if (!fno->dirflag) {
+               if (!fnode_is_dir(fno)) {
                        e = 1;
                        hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
                                        (unsigned long)inode->i_ino);
index 1e0e2ac30fd3be93f8e5b7a97618f19a52220ec4..3228c524ebe56f948d8896cec23ca6b1284f6303 100644 (file)
@@ -153,7 +153,7 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
                }
                de->length = cpu_to_le16(36);
                de->down = 1;
-               *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr);
+               *(__le32 *)((char *)de + 32) = cpu_to_le32(ptr);
        }
 }
 
@@ -177,7 +177,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
        memmove((char *)de + d_size, de, (char *)de_end - (char *)de);
        memset(de, 0, d_size);
        if (down_ptr) {
-               *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
+               *(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
                de->down = 1;
        }
        de->length = cpu_to_le16(d_size);
@@ -656,7 +656,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                                del->down = 0;
                                d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4);
                        } else if (down)
-                               *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
+                               *(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
                } else goto endm;
                if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) {
                        printk("HPFS: out of memory for dtree balancing\n");
@@ -672,7 +672,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                        de_prev->down = 1;
                        dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4);
                }
-               *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
+               *(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
                for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
@@ -1015,7 +1015,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
                kfree(name2);
                return NULL;
        }       
-       if (!upf->dirflag) {
+       if (!fnode_is_dir(upf)) {
                brelse(bh);
                hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up));
                kfree(name2);
index d8b84d113c891bbcfd8416d3f35153983b0549a7..bcaafcd2666ac275d02c2f054023cc537ebd7644 100644 (file)
@@ -23,15 +23,15 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
                        return;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
-               if (ea->indirect) {
+               if (ea_indirect(ea)) {
                        if (ea_valuelen(ea) != 8) {
-                               hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x",
+                               hpfs_error(s, "ea_indirect(ea) set while ea->valuelen!=8, %s %08x, pos %08x",
                                        ano ? "anode" : "sectors", a, pos);
                                return;
                        }
                        if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4))
                                return;
-                       hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
+                       hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
                }
                pos += ea->namelen + ea_valuelen(ea) + 5;
        }
@@ -81,7 +81,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
        struct extended_attribute *ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
+                       if (ea_indirect(ea))
                                goto indirect;
                        if (ea_valuelen(ea) >= size)
                                return -EINVAL;
@@ -91,7 +91,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
                }
        a = le32_to_cpu(fnode->ea_secno);
        len = le32_to_cpu(fnode->ea_size_l);
-       ano = fnode->ea_anode;
+       ano = fnode_in_anode(fnode);
        pos = 0;
        while (pos < len) {
                ea = (struct extended_attribute *)ex;
@@ -101,10 +101,10 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
                        return -EIO;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO;
-               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
                        return -EIO;
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
+                       if (ea_indirect(ea))
                                goto indirect;
                        if (ea_valuelen(ea) >= size)
                                return -EINVAL;
@@ -119,7 +119,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
 indirect:
        if (ea_len(ea) >= size)
                return -EINVAL;
-       if (hpfs_ea_read(s, ea_sec(ea), ea->anode, 0, ea_len(ea), buf))
+       if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf))
                return -EIO;
        buf[ea_len(ea)] = 0;
        return 0;
@@ -136,8 +136,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
        struct extended_attribute *ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
-                               return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+                       if (ea_indirect(ea))
+                               return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
                        if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
                                printk("HPFS: out of memory for EA\n");
                                return NULL;
@@ -148,7 +148,7 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
                }
        a = le32_to_cpu(fnode->ea_secno);
        len = le32_to_cpu(fnode->ea_size_l);
-       ano = fnode->ea_anode;
+       ano = fnode_in_anode(fnode);
        pos = 0;
        while (pos < len) {
                char ex[4 + 255 + 1 + 8];
@@ -159,11 +159,11 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
                        return NULL;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL;
-               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
                        return NULL;
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
-                               return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+                       if (ea_indirect(ea))
+                               return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
                        if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
                                printk("HPFS: out of memory for EA\n");
                                return NULL;
@@ -199,9 +199,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
        struct extended_attribute *ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect) {
+                       if (ea_indirect(ea)) {
                                if (ea_len(ea) == size)
-                                       set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+                                       set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
                        } else if (ea_valuelen(ea) == size) {
                                memcpy(ea_data(ea), data, size);
                        }
@@ -209,7 +209,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                }
        a = le32_to_cpu(fnode->ea_secno);
        len = le32_to_cpu(fnode->ea_size_l);
-       ano = fnode->ea_anode;
+       ano = fnode_in_anode(fnode);
        pos = 0;
        while (pos < len) {
                char ex[4 + 255 + 1 + 8];
@@ -220,12 +220,12 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                        return;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
-               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
                        return;
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect) {
+                       if (ea_indirect(ea)) {
                                if (ea_len(ea) == size)
-                                       set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+                                       set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
                        }
                        else {
                                if (ea_valuelen(ea) == size)
@@ -246,7 +246,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
        if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) {
                hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
                        (unsigned long)inode->i_ino,
-                       le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
+                       le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
                return;
        }
        if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) &&
@@ -276,7 +276,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s));
                fnode->ea_size_s = cpu_to_le16(0);
                fnode->ea_secno = cpu_to_le32(n);
-               fnode->ea_anode = cpu_to_le32(0);
+               fnode->flags &= ~FNODE_anode;
                mark_buffer_dirty(bh);
                brelse(bh);
        }
@@ -288,9 +288,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                        secno q = hpfs_alloc_sector(s, fno, 1, 0);
                        if (!q) goto bail;
                        fnode->ea_secno = cpu_to_le32(q);
-                       fnode->ea_anode = 0;
+                       fnode->flags &= ~FNODE_anode;
                        len++;
-               } else if (!fnode->ea_anode) {
+               } else if (!fnode_in_anode(fnode)) {
                        if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) {
                                len++;
                        } else {
@@ -310,7 +310,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                                anode->u.external[0].length = cpu_to_le32(len);
                                mark_buffer_dirty(bh);
                                brelse(bh);
-                               fnode->ea_anode = 1;
+                               fnode->flags |= FNODE_anode;
                                fnode->ea_secno = cpu_to_le32(a_s);*/
                                secno new_sec;
                                int i;
@@ -338,7 +338,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                                len = (pos + 511) >> 9;
                        }
                }
-               if (fnode->ea_anode) {
+               if (fnode_in_anode(fnode)) {
                        if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno),
                                                     0, len) != -1) {
                                len++;
@@ -351,16 +351,16 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
        h[1] = strlen(key);
        h[2] = size & 0xff;
        h[3] = size >> 8;
-       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
-       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
-       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
        fnode->ea_size_l = cpu_to_le32(pos);
        ret:
        hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
        return;
        bail:
        if (le32_to_cpu(fnode->ea_secno))
-               if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
+               if (fnode_in_anode(fnode)) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
                else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9));
        else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0);
 }
index 8b0650aae32812bac9abbb581439b592d64949d2..cce025aff1b19b86f824cd47bfb6c9457d583068 100644 (file)
@@ -51,11 +51,11 @@ struct hpfs_boot_block
   u8 n_rootdir_entries[2];
   u8 n_sectors_s[2];
   u8 media_byte;
-  u16 sectors_per_fat;
-  u16 sectors_per_track;
-  u16 heads_per_cyl;
-  u32 n_hidden_sectors;
-  u32 n_sectors_l;             /* size of partition */
+  __le16 sectors_per_fat;
+  __le16 sectors_per_track;
+  __le16 heads_per_cyl;
+  __le32 n_hidden_sectors;
+  __le32 n_sectors_l;          /* size of partition */
   u8 drive_number;
   u8 mbz;
   u8 sig_28h;                  /* 28h */
@@ -63,7 +63,7 @@ struct hpfs_boot_block
   u8 vol_label[11];
   u8 sig_hpfs[8];              /* "HPFS    " */
   u8 pad[448];
-  u16 magic;                   /* aa55 */
+  __le16 magic;                        /* aa55 */
 };
 
 
@@ -75,28 +75,28 @@ struct hpfs_boot_block
 
 struct hpfs_super_block
 {
-  u32 magic;                           /* f995 e849 */
-  u32 magic1;                          /* fa53 e9c5, more magic? */
+  __le32 magic;                                /* f995 e849 */
+  __le32 magic1;                       /* fa53 e9c5, more magic? */
   u8 version;                          /* version of a filesystem  usually 2 */
   u8 funcversion;                      /* functional version - oldest version
                                           of filesystem that can understand
                                           this disk */
-  u16 zero;                            /* 0 */
-  fnode_secno root;                    /* fnode of root directory */
-  secno n_sectors;                     /* size of filesystem */
-  u32 n_badblocks;                     /* number of bad blocks */
-  secno bitmaps;                       /* pointers to free space bit maps */
-  u32 zero1;                           /* 0 */
-  secno badblocks;                     /* bad block list */
-  u32 zero3;                           /* 0 */
-  time32_t last_chkdsk;                        /* date last checked, 0 if never */
-  time32_t last_optimize;              /* date last optimized, 0 if never */
-  secno n_dir_band;                    /* number of sectors in dir band */
-  secno dir_band_start;                        /* first sector in dir band */
-  secno dir_band_end;                  /* last sector in dir band */
-  secno dir_band_bitmap;               /* free space map, 1 dnode per bit */
+  __le16 zero;                         /* 0 */
+  __le32 root;                         /* fnode of root directory */
+  __le32 n_sectors;                    /* size of filesystem */
+  __le32 n_badblocks;                  /* number of bad blocks */
+  __le32 bitmaps;                      /* pointers to free space bit maps */
+  __le32 zero1;                                /* 0 */
+  __le32 badblocks;                    /* bad block list */
+  __le32 zero3;                                /* 0 */
+  __le32 last_chkdsk;                  /* date last checked, 0 if never */
+  __le32 last_optimize;                        /* date last optimized, 0 if never */
+  __le32 n_dir_band;                   /* number of sectors in dir band */
+  __le32 dir_band_start;                       /* first sector in dir band */
+  __le32 dir_band_end;                 /* last sector in dir band */
+  __le32 dir_band_bitmap;              /* free space map, 1 dnode per bit */
   u8 volume_name[32];                  /* not used */
-  secno user_id_table;                 /* 8 preallocated sectors - user id */
+  __le32 user_id_table;                        /* 8 preallocated sectors - user id */
   u32 zero6[103];                      /* 0 */
 };
 
@@ -109,8 +109,8 @@ struct hpfs_super_block
 
 struct hpfs_spare_block
 {
-  u32 magic;                           /* f991 1849 */
-  u32 magic1;                          /* fa52 29c5, more magic? */
+  __le32 magic;                                /* f991 1849 */
+  __le32 magic1;                               /* fa52 29c5, more magic? */
 
 #ifdef __LITTLE_ENDIAN
   u8 dirty: 1;                         /* 0 clean, 1 "improperly stopped" */
@@ -153,21 +153,21 @@ struct hpfs_spare_block
   u8 mm_contlgulty;
   u8 unused;
 
-  secno hotfix_map;                    /* info about remapped bad sectors */
-  u32 n_spares_used;                   /* number of hotfixes */
-  u32 n_spares;                                /* number of spares in hotfix map */
-  u32 n_dnode_spares_free;             /* spare dnodes unused */
-  u32 n_dnode_spares;                  /* length of spare_dnodes[] list,
+  __le32 hotfix_map;                   /* info about remapped bad sectors */
+  __le32 n_spares_used;                        /* number of hotfixes */
+  __le32 n_spares;                     /* number of spares in hotfix map */
+  __le32 n_dnode_spares_free;          /* spare dnodes unused */
+  __le32 n_dnode_spares;               /* length of spare_dnodes[] list,
                                           follows in this block*/
-  secno code_page_dir;                 /* code page directory block */
-  u32 n_code_pages;                    /* number of code pages */
-  u32 super_crc;                       /* on HPFS386 and LAN Server this is
+  __le32 code_page_dir;                        /* code page directory block */
+  __le32 n_code_pages;                 /* number of code pages */
+  __le32 super_crc;                    /* on HPFS386 and LAN Server this is
                                           checksum of superblock, on normal
                                           OS/2 unused */
-  u32 spare_crc;                       /* on HPFS386 checksum of spareblock */
-  u32 zero1[15];                       /* unused */
-  dnode_secno spare_dnodes[100];       /* emergency free dnode list */
-  u32 zero2[1];                                /* room for more? */
+  __le32 spare_crc;                    /* on HPFS386 checksum of spareblock */
+  __le32 zero1[15];                    /* unused */
+  __le32 spare_dnodes[100];            /* emergency free dnode list */
+  __le32 zero2[1];                     /* room for more? */
 };
 
 /* The bad block list is 4 sectors long.  The first word must be zero,
@@ -202,18 +202,18 @@ struct hpfs_spare_block
 
 struct code_page_directory
 {
-  u32 magic;                           /* 4945 21f7 */
-  u32 n_code_pages;                    /* number of pointers following */
-  u32 zero1[2];
+  __le32 magic;                                /* 4945 21f7 */
+  __le32 n_code_pages;                 /* number of pointers following */
+  __le32 zero1[2];
   struct {
-    u16 ix;                            /* index */
-    u16 code_page_number;              /* code page number */
-    u32 bounds;                                /* matches corresponding word
+    __le16 ix;                         /* index */
+    __le16 code_page_number;           /* code page number */
+    __le32 bounds;                     /* matches corresponding word
                                           in data block */
-    secno code_page_data;              /* sector number of a code_page_data
+    __le32 code_page_data;             /* sector number of a code_page_data
                                           containing c.p. array */
-    u16 index;                         /* index in c.p. array in that sector*/
-    u16 unknown;                       /* some unknown value; usually 0;
+    __le16 index;                      /* index in c.p. array in that sector*/
+    __le16 unknown;                    /* some unknown value; usually 0;
                                           2 in Japanese version */
   } array[31];                         /* unknown length */
 };
@@ -224,19 +224,19 @@ struct code_page_directory
 
 struct code_page_data
 {
-  u32 magic;                           /* 8945 21f7 */
-  u32 n_used;                          /* # elements used in c_p_data[] */
-  u32 bounds[3];                       /* looks a bit like
+  __le32 magic;                                /* 8945 21f7 */
+  __le32 n_used;                       /* # elements used in c_p_data[] */
+  __le32 bounds[3];                    /* looks a bit like
                                             (beg1,end1), (beg2,end2)
                                           one byte each */
-  u16 offs[3];                         /* offsets from start of sector
+  __le16 offs[3];                      /* offsets from start of sector
                                           to start of c_p_data[ix] */
   struct {
-    u16 ix;                            /* index */
-    u16 code_page_number;              /* code page number */
-    u16 unknown;                       /* the same as in cp directory */
+    __le16 ix;                         /* index */
+    __le16 code_page_number;           /* code page number */
+    __le16 unknown;                    /* the same as in cp directory */
     u8 map[128];                       /* upcase table for chars 80..ff */
-    u16 zero2;
+    __le16 zero2;
   } code_page[3];
   u8 incognita[78];
 };
@@ -278,8 +278,8 @@ struct code_page_data
 #define DNODE_MAGIC   0x77e40aae
 
 struct dnode {
-  u32 magic;                           /* 77e4 0aae */
-  u32 first_free;                      /* offset from start of dnode to
+  __le32 magic;                                /* 77e4 0aae */
+  __le32 first_free;                   /* offset from start of dnode to
                                           first free dir entry */
 #ifdef __LITTLE_ENDIAN
   u8 root_dnode: 1;                    /* Is it root dnode? */
@@ -293,14 +293,14 @@ struct dnode {
   u8 root_dnode: 1;                    /* Is it root dnode? */
 #endif
   u8 increment_me2[3];
-  secno up;                            /* (root dnode) directory's fnode
+  __le32 up;                           /* (root dnode) directory's fnode
                                           (nonroot) parent dnode */
-  dnode_secno self;                    /* pointer to this dnode */
+  __le32 self;                 /* pointer to this dnode */
   u8 dirent[2028];                     /* one or more dirents */
 };
 
 struct hpfs_dirent {
-  u16 length;                          /* offset to next dirent */
+  __le16 length;                       /* offset to next dirent */
 
 #ifdef __LITTLE_ENDIAN
   u8 first: 1;                         /* set on phony ^A^A (".") entry */
@@ -346,12 +346,12 @@ struct hpfs_dirent {
   u8 read_only: 1;                     /* dos attrib */
 #endif
 
-  fnode_secno fnode;                   /* fnode giving allocation info */
-  time32_t write_date;                 /* mtime */
-  u32 file_size;                       /* file length, bytes */
-  time32_t read_date;                  /* atime */
-  time32_t creation_date;                      /* ctime */
-  u32 ea_size;                         /* total EA length, bytes */
+  __le32 fnode;                                /* fnode giving allocation info */
+  __le32 write_date;                   /* mtime */
+  __le32 file_size;                    /* file length, bytes */
+  __le32 read_date;                    /* atime */
+  __le32 creation_date;                        /* ctime */
+  __le32 ea_size;                      /* total EA length, bytes */
   u8 no_of_acls;                       /* number of ACL's (low 3 bits) */
   u8 ix;                               /* code page index (of filename), see
                                           struct code_page_data */
@@ -375,50 +375,36 @@ struct hpfs_dirent {
 
 struct bplus_leaf_node
 {
-  u32 file_secno;                      /* first file sector in extent */
-  u32 length;                          /* length, sectors */
-  secno disk_secno;                    /* first corresponding disk sector */
+  __le32 file_secno;                   /* first file sector in extent */
+  __le32 length;                       /* length, sectors */
+  __le32 disk_secno;                   /* first corresponding disk sector */
 };
 
 struct bplus_internal_node
 {
-  u32 file_secno;                      /* subtree maps sectors < this  */
-  anode_secno down;                    /* pointer to subtree */
+  __le32 file_secno;                   /* subtree maps sectors < this  */
+  __le32 down;                         /* pointer to subtree */
 };
 
+enum {
+       BP_hbff = 1,
+       BP_fnode_parent = 0x20,
+       BP_binary_search = 0x40,
+       BP_internal = 0x80
+};
 struct bplus_header
 {
-#ifdef __LITTLE_ENDIAN
-  u8 hbff: 1;                  /* high bit of first free entry offset */
-  u8 flag1234: 4;
-  u8 fnode_parent: 1;                  /* ? we're pointed to by an fnode,
-                                          the data btree or some ea or the
-                                          main ea bootage pointer ea_secno */
-                                       /* also can get set in fnodes, which
-                                          may be a chkdsk glitch or may mean
-                                          this bit is irrelevant in fnodes,
-                                          or this interpretation is all wet */
-  u8 binary_search: 1;                 /* suggest binary search (unused) */
-  u8 internal: 1;                      /* 1 -> (internal) tree of anodes
-                                          0 -> (leaf) list of extents */
-#else
-  u8 internal: 1;                      /* 1 -> (internal) tree of anodes
-                                          0 -> (leaf) list of extents */
-  u8 binary_search: 1;                 /* suggest binary search (unused) */
-  u8 fnode_parent: 1;                  /* ? we're pointed to by an fnode,
+  u8 flags;                            /* bit 0 - high bit of first free entry offset
+                                          bit 5 - we're pointed to by an fnode,
                                           the data btree or some ea or the
-                                          main ea bootage pointer ea_secno */
-                                       /* also can get set in fnodes, which
-                                          may be a chkdsk glitch or may mean
-                                          this bit is irrelevant in fnodes,
-                                          or this interpretation is all wet */
-  u8 flag1234: 4;
-  u8 hbff: 1;                  /* high bit of first free entry offset */
-#endif
+                                          main ea bootage pointer ea_secno
+                                          bit 6 - suggest binary search (unused)
+                                          bit 7 - 1 -> (internal) tree of anodes
+                                                  0 -> (leaf) list of extents */
   u8 fill[3];
   u8 n_free_nodes;                     /* free nodes in following array */
   u8 n_used_nodes;                     /* used nodes in following array */
-  u16 first_free;                      /* offset from start of header to
+  __le16 first_free;                   /* offset from start of header to
                                           first free node in array */
   union {
     struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving
@@ -428,6 +414,16 @@ struct bplus_header
   } u;
 };
 
+static inline bool bp_internal(struct bplus_header *bp)
+{
+       return bp->flags & BP_internal;
+}
+
+static inline bool bp_fnode_parent(struct bplus_header *bp)
+{
+       return bp->flags & BP_fnode_parent;
+}
+
 /* fnode: root of allocation b+ tree, and EA's */
 
 /* Every file and every directory has one fnode, pointed to by the directory
@@ -436,62 +432,56 @@ struct bplus_header
 
 #define FNODE_MAGIC 0xf7e40aae
 
+enum {FNODE_anode = cpu_to_le16(2), FNODE_dir = cpu_to_le16(256)};
 struct fnode
 {
-  u32 magic;                           /* f7e4 0aae */
-  u32 zero1[2];                                /* read history */
+  __le32 magic;                                /* f7e4 0aae */
+  __le32 zero1[2];                     /* read history */
   u8 len, name[15];                    /* true length, truncated name */
-  fnode_secno up;                      /* pointer to file's directory fnode */
-  secno acl_size_l;
-  secno acl_secno;
-  u16 acl_size_s;
+  __le32 up;                           /* pointer to file's directory fnode */
+  __le32 acl_size_l;
+  __le32 acl_secno;
+  __le16 acl_size_s;
   u8 acl_anode;
   u8 zero2;                            /* history bit count */
-  u32 ea_size_l;                       /* length of disk-resident ea's */
-  secno ea_secno;                      /* first sector of disk-resident ea's*/
-  u16 ea_size_s;                       /* length of fnode-resident ea's */
-
-#ifdef __LITTLE_ENDIAN
-  u8 flag0: 1;
-  u8 ea_anode: 1;                      /* 1 -> ea_secno is an anode */
-  u8 flag234567: 6;
-#else
-  u8 flag234567: 6;
-  u8 ea_anode: 1;                      /* 1 -> ea_secno is an anode */
-  u8 flag0: 1;
-#endif
+  __le32 ea_size_l;                    /* length of disk-resident ea's */
+  __le32 ea_secno;                     /* first sector of disk-resident ea's*/
+  __le16 ea_size_s;                    /* length of fnode-resident ea's */
 
-#ifdef __LITTLE_ENDIAN
-  u8 dirflag: 1;                       /* 1 -> directory.  first & only extent
-                                          points to dnode. */
-  u8 flag9012345: 7;
-#else
-  u8 flag9012345: 7;
-  u8 dirflag: 1;                       /* 1 -> directory.  first & only extent
+  __le16 flags;                                /* bit 1 set -> ea_secno is an anode */
+                                       /* bit 8 set -> directory.  first & only extent
                                           points to dnode. */
-#endif
-
   struct bplus_header btree;           /* b+ tree, 8 extents or 12 subtrees */
   union {
     struct bplus_leaf_node external[8];
     struct bplus_internal_node internal[12];
   } u;
 
-  u32 file_size;                       /* file length, bytes */
-  u32 n_needea;                                /* number of EA's with NEEDEA set */
+  __le32 file_size;                    /* file length, bytes */
+  __le32 n_needea;                     /* number of EA's with NEEDEA set */
   u8 user_id[16];                      /* unused */
-  u16 ea_offs;                         /* offset from start of fnode
+  __le16 ea_offs;                      /* offset from start of fnode
                                           to first fnode-resident ea */
   u8 dasd_limit_treshhold;
   u8 dasd_limit_delta;
-  u32 dasd_limit;
-  u32 dasd_usage;
+  __le32 dasd_limit;
+  __le32 dasd_usage;
   u8 ea[316];                          /* zero or more EA's, packed together
                                           with no alignment padding.
                                           (Do not use this name, get here
                                           via fnode + ea_offs. I think.) */
 };
 
+static inline bool fnode_in_anode(struct fnode *p)
+{
+       return (p->flags & FNODE_anode) != 0;
+}
+
+static inline bool fnode_is_dir(struct fnode *p)
+{
+       return (p->flags & FNODE_dir) != 0;
+}
+
 
 /* anode: 99.44% pure allocation tree */
 
@@ -499,9 +489,9 @@ struct fnode
 
 struct anode
 {
-  u32 magic;                           /* 37e4 0aae */
-  anode_secno self;                    /* pointer to this anode */
-  secno up;                            /* parent anode or fnode */
+  __le32 magic;                                /* 37e4 0aae */
+  __le32 self;                         /* pointer to this anode */
+  __le32 up;                           /* parent anode or fnode */
 
   struct bplus_header btree;           /* b+tree, 40 extents or 60 subtrees */
   union {
@@ -509,7 +499,7 @@ struct anode
     struct bplus_internal_node internal[60];
   } u;
 
-  u32 fill[3];                         /* unused */
+  __le32 fill[3];                      /* unused */
 };
 
 
@@ -528,32 +518,23 @@ struct anode
    run, or in multiple runs.  Flags in the fnode tell whether the EA list
    is immediate, in a single run, or in multiple runs. */
 
+enum {EA_indirect = 1, EA_anode = 2, EA_needea = 128 };
 struct extended_attribute
 {
-#ifdef __LITTLE_ENDIAN
-  u8 indirect: 1;                      /* 1 -> value gives sector number
+  u8 flags;                            /* bit 0 set -> value gives sector number
                                           where real value starts */
-  u8 anode: 1;                         /* 1 -> sector is an anode
+                                       /* bit 1 set -> sector is an anode
                                           that points to fragmented value */
-  u8 flag23456: 5;
-  u8 needea: 1;                                /* required ea */
-#else
-  u8 needea: 1;                                /* required ea */
-  u8 flag23456: 5;
-  u8 anode: 1;                         /* 1 -> sector is an anode
-                                          that points to fragmented value */
-  u8 indirect: 1;                      /* 1 -> value gives sector number
-                                          where real value starts */
-#endif
+                                       /* bit 7 set -> required ea */
   u8 namelen;                          /* length of name, bytes */
   u8 valuelen_lo;                      /* length of value, bytes */
   u8 valuelen_hi;                      /* length of value, bytes */
-  u8 name[0];
+  u8 name[];
   /*
     u8 name[namelen];                  ascii attrib name
     u8 nul;                            terminating '\0', not counted
     u8 value[valuelen];                        value, arbitrary
-      if this.indirect, valuelen is 8 and the value is
+      if this.flags & 1, valuelen is 8 and the value is
         u32 length;                    real length of value, bytes
         secno secno;                   sector address where it starts
       if this.anode, the above sector number is the root of an anode tree
@@ -561,6 +542,16 @@ struct extended_attribute
   */
 };
 
+static inline bool ea_indirect(struct extended_attribute *ea)
+{
+       return ea->flags & EA_indirect;
+}
+
+static inline bool ea_in_anode(struct extended_attribute *ea)
+{
+       return ea->flags & EA_anode;
+}
+
 /*
    Local Variables:
    comment-column: 40
index de946170ebb1092937a1efd5f8dbd104f064d170..c07ef1f1ced60a0cf295772a218575d9c78e58d1 100644 (file)
 
 #define CHKCOND(x,y) if (!(x)) printk y
 
-#ifdef DBG
-#define PRINTK(x) printk x
-#else
-#undef PRINTK
-#define PRINTK(x)
-#endif
-
 struct hpfs_inode_info {
        loff_t mmu_private;
        ino_t i_parent_dir;     /* (directories) gives fnode of parent dir */
@@ -82,7 +75,7 @@ struct hpfs_sb_info {
        unsigned char *sb_cp_table;     /* code page tables: */
                                        /*      128 bytes uppercasing table & */
                                        /*      128 bytes lowercasing table */
-       unsigned *sb_bmp_dir;           /* main bitmap directory */
+       __le32 *sb_bmp_dir;             /* main bitmap directory */
        unsigned sb_c_bitmap;           /* current bitmap */
        unsigned sb_max_fwd_alloc;      /* max forwad allocation */
        int sb_timeshift;
@@ -100,7 +93,7 @@ struct quad_buffer_head {
 static inline dnode_secno de_down_pointer (struct hpfs_dirent *de)
 {
   CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n"));
-  return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4));
+  return le32_to_cpu(*(__le32 *) ((void *) de + le16_to_cpu(de->length) - 4));
 }
 
 /* The first dir entry in a dnode */
@@ -148,12 +141,12 @@ static inline struct extended_attribute *next_ea(struct extended_attribute *ea)
 
 static inline secno ea_sec(struct extended_attribute *ea)
 {
-       return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen)));
+       return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 9 + ea->namelen)));
 }
 
 static inline secno ea_len(struct extended_attribute *ea)
 {
-       return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen)));
+       return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 5 + ea->namelen)));
 }
 
 static inline char *ea_data(struct extended_attribute *ea)
@@ -178,7 +171,7 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src)
        dst->not_8x3 = n;
 }
 
-static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n)
+static inline unsigned tstbits(__le32 *bmp, unsigned b, unsigned n)
 {
        int i;
        if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n;
@@ -275,10 +268,10 @@ void hpfs_evict_inode(struct inode *);
 
 /* map.c */
 
-unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
-unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
+__le32 *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
+__le32 *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
 unsigned char *hpfs_load_code_page(struct super_block *, secno);
-secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
+__le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
 struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
 struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
index b43066cbdc6a7cd201538cdc86e28fd9d22deac8..ed671e0ea78443b35bb6d1dd3eabc64bd7559c1f 100644 (file)
@@ -110,7 +110,7 @@ void hpfs_read_inode(struct inode *i)
                        }
                }
        }
-       if (fnode->dirflag) {
+       if (fnode_is_dir(fnode)) {
                int n_dnodes, n_subdirs;
                i->i_mode |= S_IFDIR;
                i->i_op = &hpfs_dir_iops;
index a790821366a7f045d068fe47df517dc479b0ecce..4acb19d78359d4bec83f90b854680dc3962905cf 100644 (file)
@@ -8,12 +8,12 @@
 
 #include "hpfs_fn.h"
 
-unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
+__le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
 {
        return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
 }
 
-unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
+__le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
                         struct quad_buffer_head *qbh, char *id)
 {
        secno sec;
@@ -89,18 +89,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
        return cp_table;
 }
 
-secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
+__le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
 {
        struct buffer_head *bh;
        int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
        int i;
-       secno *b;
+       __le32 *b;
        if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
                printk("HPFS: can't allocate memory for bitmap directory\n");
                return NULL;
        }       
        for (i=0;i<n;i++) {
-               secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
+               __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
                if (!d) {
                        kfree(b);
                        return NULL;
@@ -130,16 +130,16 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
                                        (unsigned long)ino);
                                goto bail;
                        }
-                       if (!fnode->dirflag) {
+                       if (!fnode_is_dir(fnode)) {
                                if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
-                                   (fnode->btree.internal ? 12 : 8)) {
+                                   (bp_internal(&fnode->btree) ? 12 : 8)) {
                                        hpfs_error(s,
                                           "bad number of nodes in fnode %08lx",
                                            (unsigned long)ino);
                                        goto bail;
                                }
                                if (le16_to_cpu(fnode->btree.first_free) !=
-                                   8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
+                                   8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
                                        hpfs_error(s,
                                            "bad first_free pointer in fnode %08lx",
                                            (unsigned long)ino);
@@ -187,12 +187,12 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
                                goto bail;
                        }
                        if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
-                           (anode->btree.internal ? 60 : 40)) {
+                           (bp_internal(&anode->btree) ? 60 : 40)) {
                                hpfs_error(s, "bad number of nodes in anode %08x", ano);
                                goto bail;
                        }
                        if (le16_to_cpu(anode->btree.first_free) !=
-                           8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) {
+                           8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
                                hpfs_error(s, "bad first_free pointer in anode %08x", ano);
                                goto bail;
                        }
index 30dd7b10b507a077877d58a2bb4d5ada18ee3101..9083ef8af58c162f7fd207f7ef37263b1f35de4f 100644 (file)
@@ -70,7 +70,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        fnode->len = len;
        memcpy(fnode->name, name, len > 15 ? 15 : len);
        fnode->up = cpu_to_le32(dir->i_ino);
-       fnode->dirflag = 1;
+       fnode->flags |= FNODE_dir;
        fnode->btree.n_free_nodes = 7;
        fnode->btree.n_used_nodes = 1;
        fnode->btree.first_free = cpu_to_le16(0x14);
index 54f6eccb79d9ed8c67f7ada5a96867ad4c61b37c..706a12c083ea726a7a268d647ae266b02a3a2ca7 100644 (file)
@@ -572,7 +572,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                mark_buffer_dirty(bh2);
        }
 
-       if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) {
+       if (spareblock->hotfixes_used || spareblock->n_spares_used) {
                if (errs >= 2) {
                        printk("HPFS: Hotfixes not supported here, try chkdsk\n");
                        mark_dirty(s, 0);
@@ -645,7 +645,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                root->i_mtime.tv_nsec = 0;
                root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
                root->i_ctime.tv_nsec = 0;
-               hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size);
+               hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
                hpfs_i(root)->i_parent_dir = root->i_ino;
                if (root->i_size == -1)
                        root->i_size = 2048;
index 6bc8761cc3333524bf6c0408a4227f14a325bf7e..c99163b1b31036ef68974c0c5dbc192f8f73f4da 100644 (file)
@@ -1487,10 +1487,30 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
        return 0;
 }
 
+/*
+ * This does the actual work of updating an inodes time or version.  Must have
+ * had called mnt_want_write() before calling this.
+ */
+static int update_time(struct inode *inode, struct timespec *time, int flags)
+{
+       if (inode->i_op->update_time)
+               return inode->i_op->update_time(inode, time, flags);
+
+       if (flags & S_ATIME)
+               inode->i_atime = *time;
+       if (flags & S_VERSION)
+               inode_inc_iversion(inode);
+       if (flags & S_CTIME)
+               inode->i_ctime = *time;
+       if (flags & S_MTIME)
+               inode->i_mtime = *time;
+       mark_inode_dirty_sync(inode);
+       return 0;
+}
+
 /**
  *     touch_atime     -       update the access time
- *     @mnt: mount the inode is accessed on
- *     @dentry: dentry accessed
+ *     @path: the &struct path to update
  *
  *     Update the accessed time on an inode and mark it for writeback.
  *     This function automatically handles read only file systems and media,
@@ -1525,12 +1545,83 @@ void touch_atime(struct path *path)
        if (mnt_want_write(mnt))
                return;
 
-       inode->i_atime = now;
-       mark_inode_dirty_sync(inode);
+       /*
+        * File systems can error out when updating inodes if they need to
+        * allocate new space to modify an inode (such is the case for
+        * Btrfs), but since we touch atime while walking down the path we
+        * really don't care if we failed to update the atime of the file,
+        * so just ignore the return value.
+        */
+       update_time(inode, &now, S_ATIME);
        mnt_drop_write(mnt);
 }
 EXPORT_SYMBOL(touch_atime);
 
+/*
+ * The logic we want is
+ *
+ *     if suid or (sgid and xgrp)
+ *             remove privs
+ */
+int should_remove_suid(struct dentry *dentry)
+{
+       umode_t mode = dentry->d_inode->i_mode;
+       int kill = 0;
+
+       /* suid always must be killed */
+       if (unlikely(mode & S_ISUID))
+               kill = ATTR_KILL_SUID;
+
+       /*
+        * sgid without any exec bits is just a mandatory locking mark; leave
+        * it alone.  If some exec bits are set, it's a real sgid; kill it.
+        */
+       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+               kill |= ATTR_KILL_SGID;
+
+       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
+               return kill;
+
+       return 0;
+}
+EXPORT_SYMBOL(should_remove_suid);
+
+static int __remove_suid(struct dentry *dentry, int kill)
+{
+       struct iattr newattrs;
+
+       newattrs.ia_valid = ATTR_FORCE | kill;
+       return notify_change(dentry, &newattrs);
+}
+
+int file_remove_suid(struct file *file)
+{
+       struct dentry *dentry = file->f_path.dentry;
+       struct inode *inode = dentry->d_inode;
+       int killsuid;
+       int killpriv;
+       int error = 0;
+
+       /* Fast path for nothing security related */
+       if (IS_NOSEC(inode))
+               return 0;
+
+       killsuid = should_remove_suid(dentry);
+       killpriv = security_inode_need_killpriv(dentry);
+
+       if (killpriv < 0)
+               return killpriv;
+       if (killpriv)
+               error = security_inode_killpriv(dentry);
+       if (!error && killsuid)
+               error = __remove_suid(dentry, killsuid);
+       if (!error && (inode->i_sb->s_flags & MS_NOSEC))
+               inode->i_flags |= S_NOSEC;
+
+       return error;
+}
+EXPORT_SYMBOL(file_remove_suid);
+
 /**
  *     file_update_time        -       update mtime and ctime time
  *     @file: file accessed
@@ -1540,18 +1631,20 @@ EXPORT_SYMBOL(touch_atime);
  *     usage in the file write path of filesystems, and filesystems may
  *     choose to explicitly ignore update via this function with the
  *     S_NOCMTIME inode flag, e.g. for network filesystem where these
- *     timestamps are handled by the server.
+ *     timestamps are handled by the server.  This can return an error for
+ *     file systems who need to allocate space in order to update an inode.
  */
 
-void file_update_time(struct file *file)
+int file_update_time(struct file *file)
 {
        struct inode *inode = file->f_path.dentry->d_inode;
        struct timespec now;
-       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+       int sync_it = 0;
+       int ret;
 
        /* First try to exhaust all avenues to not sync */
        if (IS_NOCMTIME(inode))
-               return;
+               return 0;
 
        now = current_fs_time(inode->i_sb);
        if (!timespec_equal(&inode->i_mtime, &now))
@@ -1564,21 +1657,16 @@ void file_update_time(struct file *file)
                sync_it |= S_VERSION;
 
        if (!sync_it)
-               return;
+               return 0;
 
        /* Finally allowed to write? Takes lock. */
        if (mnt_want_write_file(file))
-               return;
+               return 0;
 
-       /* Only change inode inside the lock region */
-       if (sync_it & S_VERSION)
-               inode_inc_iversion(inode);
-       if (sync_it & S_CTIME)
-               inode->i_ctime = now;
-       if (sync_it & S_MTIME)
-               inode->i_mtime = now;
-       mark_inode_dirty_sync(inode);
+       ret = update_time(inode, &now, sync_it);
        mnt_drop_write_file(file);
+
+       return ret;
 }
 EXPORT_SYMBOL(file_update_time);
 
@@ -1748,3 +1836,50 @@ bool inode_owner_or_capable(const struct inode *inode)
        return false;
 }
 EXPORT_SYMBOL(inode_owner_or_capable);
+
+/*
+ * Direct i/o helper functions
+ */
+static void __inode_dio_wait(struct inode *inode)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
+       DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
+
+       do {
+               prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
+               if (atomic_read(&inode->i_dio_count))
+                       schedule();
+       } while (atomic_read(&inode->i_dio_count));
+       finish_wait(wq, &q.wait);
+}
+
+/**
+ * inode_dio_wait - wait for outstanding DIO requests to finish
+ * @inode: inode to wait for
+ *
+ * Waits for all pending direct I/O requests to finish so that we can
+ * proceed with a truncate or equivalent operation.
+ *
+ * Must be called under a lock that serializes taking new references
+ * to i_dio_count, usually by inode->i_mutex.
+ */
+void inode_dio_wait(struct inode *inode)
+{
+       if (atomic_read(&inode->i_dio_count))
+               __inode_dio_wait(inode);
+}
+EXPORT_SYMBOL(inode_dio_wait);
+
+/*
+ * inode_dio_done - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+void inode_dio_done(struct inode *inode)
+{
+       if (atomic_dec_and_test(&inode->i_dio_count))
+               wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
+EXPORT_SYMBOL(inode_dio_done);
index 9962c59ba280b1c75d78adc55b8491733075a5e0..18bc216ea09d95ecff126ef96987ff786b5cbcb1 100644 (file)
@@ -56,7 +56,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
 
 extern void __init mnt_init(void);
 
-DECLARE_BRLOCK(vfsmount_lock);
+extern struct lglock vfsmount_lock;
 
 
 /*
@@ -100,6 +100,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
 
 extern long do_handle_open(int mountdirfd,
                           struct file_handle __user *ufh, int open_flag);
+extern int open_check_o_direct(struct file *f);
 
 /*
  * inode.c
index dd4687ff30d09900a14f113aec870007cfcfb7f0..aa4356d09eeeb03167bcf506a7fe8ad98efaba39 100644 (file)
@@ -107,12 +107,11 @@ static struct dentry *isofs_export_get_parent(struct dentry *child)
 }
 
 static int
-isofs_export_encode_fh(struct dentry *dentry,
+isofs_export_encode_fh(struct inode *inode,
                       __u32 *fh32,
                       int *max_len,
-                      int connectable)
+                      struct inode *parent)
 {
-       struct inode * inode = dentry->d_inode;
        struct iso_inode_info * ei = ISOFS_I(inode);
        int len = *max_len;
        int type = 1;
@@ -124,7 +123,7 @@ isofs_export_encode_fh(struct dentry *dentry,
         * offset of the inode and the upper 16 bits of fh32[1] to
         * hold the offset of the parent.
         */
-       if (connectable && (len < 5)) {
+       if (parent && (len < 5)) {
                *max_len = 5;
                return 255;
        } else if (len < 3) {
@@ -136,16 +135,12 @@ isofs_export_encode_fh(struct dentry *dentry,
        fh32[0] = ei->i_iget5_block;
        fh16[2] = (__u16)ei->i_iget5_offset;  /* fh16 [sic] */
        fh32[2] = inode->i_generation;
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
+       if (parent) {
                struct iso_inode_info *eparent;
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
                eparent = ISOFS_I(parent);
                fh32[3] = eparent->i_iget5_block;
                fh16[3] = (__u16)eparent->i_iget5_offset;  /* fh16 [sic] */
                fh32[4] = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
                len = 5;
                type = 2;
        }
index f32f346f4b0a521a5b6bbaedc7b0a5a7750c4b1e..69a48c2944da682c8a133fe75183c086ef08813b 100644 (file)
@@ -1,6 +1,8 @@
 config JBD2
        tristate
        select CRC32
+       select CRYPTO
+       select CRYPTO_CRC32C
        help
          This is a generic journaling layer for block devices that support
          both 32-bit and 64-bit block numbers.  It is currently used by
index 840f70f507924a0ac4db70a9d729f715783b49be..216f4299f65e7e2f1e26859c8e1247cdf71c55df 100644 (file)
@@ -85,6 +85,24 @@ nope:
        __brelse(bh);
 }
 
+static void jbd2_commit_block_csum_set(journal_t *j,
+                                      struct journal_head *descriptor)
+{
+       struct commit_header *h;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       h = (struct commit_header *)(jh2bh(descriptor)->b_data);
+       h->h_chksum_type = 0;
+       h->h_chksum_size = 0;
+       h->h_chksum[0] = 0;
+       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+                          j->j_blocksize);
+       h->h_chksum[0] = cpu_to_be32(csum);
+}
+
 /*
  * Done it all: now submit the commit record.  We should have
  * cleaned up our previous buffers by now, so if we are in abort
@@ -128,6 +146,7 @@ static int journal_submit_commit_record(journal_t *journal,
                tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
                tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
        }
+       jbd2_commit_block_csum_set(journal, descriptor);
 
        JBUFFER_TRACE(descriptor, "submit commit block");
        lock_buffer(bh);
@@ -301,6 +320,44 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
                tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 }
 
+static void jbd2_descr_block_csum_set(journal_t *j,
+                                     struct journal_head *descriptor)
+{
+       struct jbd2_journal_block_tail *tail;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       tail = (struct jbd2_journal_block_tail *)
+                       (jh2bh(descriptor)->b_data + j->j_blocksize -
+                       sizeof(struct jbd2_journal_block_tail));
+       tail->t_checksum = 0;
+       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+                          j->j_blocksize);
+       tail->t_checksum = cpu_to_be32(csum);
+}
+
+static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+                                   struct buffer_head *bh, __u32 sequence)
+{
+       struct page *page = bh->b_page;
+       __u8 *addr;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       sequence = cpu_to_be32(sequence);
+       addr = kmap_atomic(page, KM_USER0);
+       csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+                         sizeof(sequence));
+       csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
+                         bh->b_size);
+       kunmap_atomic(addr, KM_USER0);
+
+       tag->t_checksum = cpu_to_be32(csum);
+}
 /*
  * jbd2_journal_commit_transaction
  *
@@ -334,6 +391,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        unsigned long first_block;
        tid_t first_tid;
        int update_tail;
+       int csum_size = 0;
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               csum_size = sizeof(struct jbd2_journal_block_tail);
 
        /*
         * First job: lock down the current transaction and wait for
@@ -627,7 +688,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
                tag = (journal_block_tag_t *) tagp;
                write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
-               tag->t_flags = cpu_to_be32(tag_flag);
+               tag->t_flags = cpu_to_be16(tag_flag);
+               jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
+                                       commit_transaction->t_tid);
                tagp += tag_bytes;
                space_left -= tag_bytes;
 
@@ -643,7 +706,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
                if (bufs == journal->j_wbufsize ||
                    commit_transaction->t_buffers == NULL ||
-                   space_left < tag_bytes + 16) {
+                   space_left < tag_bytes + 16 + csum_size) {
 
                        jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
 
@@ -651,8 +714,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                            submitting the IOs.  "tag" still points to
                            the last tag we set up. */
 
-                       tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
+                       tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
 
+                       jbd2_descr_block_csum_set(journal, descriptor);
 start_journal_io:
                        for (i = 0; i < bufs; i++) {
                                struct buffer_head *bh = wbuf[i];
index 1afb701622b0b17748b4cd7f7d171df139bcc9cc..e9a3c4c85594e30aca1ed1f14d5667ba0595160a 100644 (file)
@@ -97,6 +97,43 @@ EXPORT_SYMBOL(jbd2_inode_cache);
 static void __journal_abort_soft (journal_t *journal, int errno);
 static int jbd2_journal_create_slab(size_t slab_size);
 
+/* Checksumming functions */
+int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+{
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
+}
+
+static __u32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+{
+       __u32 csum, old_csum;
+
+       old_csum = sb->s_checksum;
+       sb->s_checksum = 0;
+       csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t));
+       sb->s_checksum = old_csum;
+
+       return cpu_to_be32(csum);
+}
+
+int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+{
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       return sb->s_checksum == jbd2_superblock_csum(j, sb);
+}
+
+void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
+{
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       sb->s_checksum = jbd2_superblock_csum(j, sb);
+}
+
 /*
  * Helper function used to manage commit timeouts
  */
@@ -1348,6 +1385,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal)
        jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
                  journal->j_errno);
        sb->s_errno    = cpu_to_be32(journal->j_errno);
+       jbd2_superblock_csum_set(journal, sb);
        read_unlock(&journal->j_state_lock);
 
        jbd2_write_superblock(journal, WRITE_SYNC);
@@ -1376,6 +1414,9 @@ static int journal_get_superblock(journal_t *journal)
                }
        }
 
+       if (buffer_verified(bh))
+               return 0;
+
        sb = journal->j_superblock;
 
        err = -EINVAL;
@@ -1413,6 +1454,43 @@ static int journal_get_superblock(journal_t *journal)
                goto out;
        }
 
+       if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
+           JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+               /* Can't have checksum v1 and v2 on at the same time! */
+               printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+                      "at the same time!\n");
+               goto out;
+       }
+
+       if (!jbd2_verify_csum_type(journal, sb)) {
+               printk(KERN_ERR "JBD: Unknown checksum type\n");
+               goto out;
+       }
+
+       /* Load the checksum driver */
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+               journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+               if (IS_ERR(journal->j_chksum_driver)) {
+                       printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+                       err = PTR_ERR(journal->j_chksum_driver);
+                       journal->j_chksum_driver = NULL;
+                       goto out;
+               }
+       }
+
+       /* Check superblock checksum */
+       if (!jbd2_superblock_csum_verify(journal, sb)) {
+               printk(KERN_ERR "JBD: journal checksum error\n");
+               goto out;
+       }
+
+       /* Precompute checksum seed for all metadata */
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+                                                  sizeof(sb->s_uuid));
+
+       set_buffer_verified(bh);
+
        return 0;
 
 out:
@@ -1564,6 +1642,8 @@ int jbd2_journal_destroy(journal_t *journal)
                iput(journal->j_inode);
        if (journal->j_revoke)
                jbd2_journal_destroy_revoke(journal);
+       if (journal->j_chksum_driver)
+               crypto_free_shash(journal->j_chksum_driver);
        kfree(journal->j_wbuf);
        kfree(journal);
 
@@ -1653,6 +1733,10 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com
 int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
                          unsigned long ro, unsigned long incompat)
 {
+#define INCOMPAT_FEATURE_ON(f) \
+               ((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f)))
+#define COMPAT_FEATURE_ON(f) \
+               ((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f)))
        journal_superblock_t *sb;
 
        if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
@@ -1661,16 +1745,54 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
        if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
                return 0;
 
+       /* Asking for checksumming v2 and v1?  Only give them v2. */
+       if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
+           compat & JBD2_FEATURE_COMPAT_CHECKSUM)
+               compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
+
        jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
                  compat, ro, incompat);
 
        sb = journal->j_superblock;
 
+       /* If enabling v2 checksums, update superblock */
+       if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+               sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
+               sb->s_feature_compat &=
+                       ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+
+               /* Load the checksum driver */
+               if (journal->j_chksum_driver == NULL) {
+                       journal->j_chksum_driver = crypto_alloc_shash("crc32c",
+                                                                     0, 0);
+                       if (IS_ERR(journal->j_chksum_driver)) {
+                               printk(KERN_ERR "JBD: Cannot load crc32c "
+                                      "driver.\n");
+                               journal->j_chksum_driver = NULL;
+                               return 0;
+                       }
+               }
+
+               /* Precompute checksum seed for all metadata */
+               if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+                                             JBD2_FEATURE_INCOMPAT_CSUM_V2))
+                       journal->j_csum_seed = jbd2_chksum(journal, ~0,
+                                                          sb->s_uuid,
+                                                          sizeof(sb->s_uuid));
+       }
+
+       /* If enabling v1 checksums, downgrade superblock */
+       if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
+               sb->s_feature_incompat &=
+                       ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
+
        sb->s_feature_compat    |= cpu_to_be32(compat);
        sb->s_feature_ro_compat |= cpu_to_be32(ro);
        sb->s_feature_incompat  |= cpu_to_be32(incompat);
 
        return 1;
+#undef COMPAT_FEATURE_ON
+#undef INCOMPAT_FEATURE_ON
 }
 
 /*
@@ -1975,10 +2097,16 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
  */
 size_t journal_tag_bytes(journal_t *journal)
 {
+       journal_block_tag_t tag;
+       size_t x = 0;
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               x += sizeof(tag.t_checksum);
+
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
-               return JBD2_TAG_SIZE64;
+               return x + JBD2_TAG_SIZE64;
        else
-               return JBD2_TAG_SIZE32;
+               return x + JBD2_TAG_SIZE32;
 }
 
 /*
index c1a03354a22ff1b5a787251b422afcb5225ca2c9..0131e4362534c4d5b83273130ee292463ec49f07 100644 (file)
@@ -174,6 +174,25 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
        return 0;
 }
 
+static int jbd2_descr_block_csum_verify(journal_t *j,
+                                       void *buf)
+{
+       struct jbd2_journal_block_tail *tail;
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
+                       sizeof(struct jbd2_journal_block_tail));
+       provided = tail->t_checksum;
+       tail->t_checksum = 0;
+       calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+       tail->t_checksum = provided;
+
+       provided = be32_to_cpu(provided);
+       return provided == calculated;
+}
 
 /*
  * Count the number of in-use tags in a journal descriptor block.
@@ -186,6 +205,9 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
        int                     nr = 0, size = journal->j_blocksize;
        int                     tag_bytes = journal_tag_bytes(journal);
 
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               size -= sizeof(struct jbd2_journal_block_tail);
+
        tagp = &bh->b_data[sizeof(journal_header_t)];
 
        while ((tagp - bh->b_data + tag_bytes) <= size) {
@@ -193,10 +215,10 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
 
                nr++;
                tagp += tag_bytes;
-               if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
+               if (!(tag->t_flags & cpu_to_be16(JBD2_FLAG_SAME_UUID)))
                        tagp += 16;
 
-               if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
+               if (tag->t_flags & cpu_to_be16(JBD2_FLAG_LAST_TAG))
                        break;
        }
 
@@ -353,6 +375,41 @@ static int calc_chksums(journal_t *journal, struct buffer_head *bh,
        return 0;
 }
 
+static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+{
+       struct commit_header *h;
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       h = buf;
+       provided = h->h_chksum[0];
+       h->h_chksum[0] = 0;
+       calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+       h->h_chksum[0] = provided;
+
+       provided = be32_to_cpu(provided);
+       return provided == calculated;
+}
+
+static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+                                     void *buf, __u32 sequence)
+{
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       sequence = cpu_to_be32(sequence);
+       calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+                                sizeof(sequence));
+       calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize);
+       provided = be32_to_cpu(tag->t_checksum);
+
+       return provided == cpu_to_be32(calculated);
+}
+
 static int do_one_pass(journal_t *journal,
                        struct recovery_info *info, enum passtype pass)
 {
@@ -366,6 +423,7 @@ static int do_one_pass(journal_t *journal,
        int                     blocktype;
        int                     tag_bytes = journal_tag_bytes(journal);
        __u32                   crc32_sum = ~0; /* Transactional Checksums */
+       int                     descr_csum_size = 0;
 
        /*
         * First thing is to establish what we expect to find in the log
@@ -451,6 +509,18 @@ static int do_one_pass(journal_t *journal,
 
                switch(blocktype) {
                case JBD2_DESCRIPTOR_BLOCK:
+                       /* Verify checksum first */
+                       if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+                                       JBD2_FEATURE_INCOMPAT_CSUM_V2))
+                               descr_csum_size =
+                                       sizeof(struct jbd2_journal_block_tail);
+                       if (descr_csum_size > 0 &&
+                           !jbd2_descr_block_csum_verify(journal,
+                                                         bh->b_data)) {
+                               err = -EIO;
+                               goto failed;
+                       }
+
                        /* If it is a valid descriptor block, replay it
                         * in pass REPLAY; if journal_checksums enabled, then
                         * calculate checksums in PASS_SCAN, otherwise,
@@ -481,11 +551,11 @@ static int do_one_pass(journal_t *journal,
 
                        tagp = &bh->b_data[sizeof(journal_header_t)];
                        while ((tagp - bh->b_data + tag_bytes)
-                              <= journal->j_blocksize) {
+                              <= journal->j_blocksize - descr_csum_size) {
                                unsigned long io_block;
 
                                tag = (journal_block_tag_t *) tagp;
-                               flags = be32_to_cpu(tag->t_flags);
+                               flags = be16_to_cpu(tag->t_flags);
 
                                io_block = next_log_block++;
                                wrap(journal, next_log_block);
@@ -516,6 +586,19 @@ static int do_one_pass(journal_t *journal,
                                                goto skip_write;
                                        }
 
+                                       /* Look for block corruption */
+                                       if (!jbd2_block_tag_csum_verify(
+                                               journal, tag, obh->b_data,
+                                               be32_to_cpu(tmp->h_sequence))) {
+                                               brelse(obh);
+                                               success = -EIO;
+                                               printk(KERN_ERR "JBD: Invalid "
+                                                      "checksum recovering "
+                                                      "block %llu in log\n",
+                                                      blocknr);
+                                               continue;
+                                       }
+
                                        /* Find a buffer for the new
                                         * data being restored */
                                        nbh = __getblk(journal->j_fs_dev,
@@ -650,6 +733,19 @@ static int do_one_pass(journal_t *journal,
                                }
                                crc32_sum = ~0;
                        }
+                       if (pass == PASS_SCAN &&
+                           !jbd2_commit_block_csum_verify(journal,
+                                                          bh->b_data)) {
+                               info->end_transaction = next_commit_ID;
+
+                               if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
+                                    JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
+                                       journal->j_failed_commit =
+                                               next_commit_ID;
+                                       brelse(bh);
+                                       break;
+                               }
+                       }
                        brelse(bh);
                        next_commit_ID++;
                        continue;
@@ -706,6 +802,25 @@ static int do_one_pass(journal_t *journal,
        return err;
 }
 
+static int jbd2_revoke_block_csum_verify(journal_t *j,
+                                        void *buf)
+{
+       struct jbd2_journal_revoke_tail *tail;
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
+                       sizeof(struct jbd2_journal_revoke_tail));
+       provided = tail->r_checksum;
+       tail->r_checksum = 0;
+       calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+       tail->r_checksum = provided;
+
+       provided = be32_to_cpu(provided);
+       return provided == calculated;
+}
 
 /* Scan a revoke record, marking all blocks mentioned as revoked. */
 
@@ -720,6 +835,9 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
        offset = sizeof(jbd2_journal_revoke_header_t);
        max = be32_to_cpu(header->r_count);
 
+       if (!jbd2_revoke_block_csum_verify(journal, header))
+               return -EINVAL;
+
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
                record_len = 8;
 
index 6973705d6a3d9db1c96ed67f55c97c8a13ee2f6d..f30b80b4ce8bef98cab621bf731e13682661ca6d 100644 (file)
@@ -578,6 +578,7 @@ static void write_one_revoke_record(journal_t *journal,
                                    struct jbd2_revoke_record_s *record,
                                    int write_op)
 {
+       int csum_size = 0;
        struct journal_head *descriptor;
        int offset;
        journal_header_t *header;
@@ -592,9 +593,13 @@ static void write_one_revoke_record(journal_t *journal,
        descriptor = *descriptorp;
        offset = *offsetp;
 
+       /* Do we need to leave space at the end for a checksum? */
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               csum_size = sizeof(struct jbd2_journal_revoke_tail);
+
        /* Make sure we have a descriptor with space left for the record */
        if (descriptor) {
-               if (offset == journal->j_blocksize) {
+               if (offset >= journal->j_blocksize - csum_size) {
                        flush_descriptor(journal, descriptor, offset, write_op);
                        descriptor = NULL;
                }
@@ -631,6 +636,24 @@ static void write_one_revoke_record(journal_t *journal,
        *offsetp = offset;
 }
 
+static void jbd2_revoke_csum_set(journal_t *j,
+                                struct journal_head *descriptor)
+{
+       struct jbd2_journal_revoke_tail *tail;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       tail = (struct jbd2_journal_revoke_tail *)
+                       (jh2bh(descriptor)->b_data + j->j_blocksize -
+                       sizeof(struct jbd2_journal_revoke_tail));
+       tail->r_checksum = 0;
+       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+                          j->j_blocksize);
+       tail->r_checksum = cpu_to_be32(csum);
+}
+
 /*
  * Flush a revoke descriptor out to the journal.  If we are aborting,
  * this is a noop; otherwise we are generating a buffer which needs to
@@ -652,6 +675,8 @@ static void flush_descriptor(journal_t *journal,
 
        header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
        header->r_count = cpu_to_be32(offset);
+       jbd2_revoke_csum_set(journal, descriptor);
+
        set_buffer_jwrite(bh);
        BUFFER_TRACE(bh, "write");
        set_buffer_dirty(bh);
index ddcd3549c6c26cbc9cb9dd46831b189ed3c0441e..fb1ab9533b67277a557cd5f8ea9f7216b8284d4e 100644 (file)
@@ -162,8 +162,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
 
 alloc_transaction:
        if (!journal->j_running_transaction) {
-               new_transaction = kmem_cache_alloc(transaction_cache,
-                                                  gfp_mask | __GFP_ZERO);
+               new_transaction = kmem_cache_zalloc(transaction_cache,
+                                                   gfp_mask);
                if (!new_transaction) {
                        /*
                         * If __GFP_FS is not present, then we may be
index 55a0c1dceadfddcf990b8fdbcfec015fc75fab32..413ef89c2d1ba32fe8507f29355d11c5873bc7d0 100644 (file)
@@ -32,6 +32,13 @@ struct jffs2_inodirty;
 struct jffs2_mount_opts {
        bool override_compr;
        unsigned int compr;
+
+       /* The size of the reserved pool. The reserved pool is the JFFS2 flash
+        * space which may only be used by root cannot be used by the other
+        * users. This is implemented simply by means of not allowing the
+        * latter users to write to the file system if the amount if the
+        * available space is less then 'rp_size'. */
+       unsigned int rp_size;
 };
 
 /* A struct for the overall file system control.  Pointers to
@@ -126,6 +133,10 @@ struct jffs2_sb_info {
        struct jffs2_inodirty *wbuf_inodes;
        struct rw_semaphore wbuf_sem;   /* Protects the write buffer */
 
+       struct delayed_work wbuf_dwork; /* write-buffer write-out work */
+       int wbuf_queued;                /* non-zero delayed work is queued */
+       spinlock_t wbuf_dwork_lock;     /* protects wbuf_dwork and and wbuf_queued */
+
        unsigned char *oobbuf;
        int oobavail; /* How many bytes are available for JFFS2 in OOB */
 #endif
index 6784d1e7a7eb3440b7e7707a4659f79e8cec7433..0c96eb52c79783057862a2c8f66106fc2a424113 100644 (file)
 #include "nodelist.h"
 #include "debug.h"
 
+/*
+ * Check whether the user is allowed to write.
+ */
+static int jffs2_rp_can_write(struct jffs2_sb_info *c)
+{
+       uint32_t avail;
+       struct jffs2_mount_opts *opts = &c->mount_opts;
+
+       avail = c->dirty_size + c->free_size + c->unchecked_size +
+               c->erasing_size - c->resv_blocks_write * c->sector_size
+               - c->nospc_dirty_size;
+
+       if (avail < 2 * opts->rp_size)
+               jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
+                         "erasing_size %u, unchecked_size %u, "
+                         "nr_erasing_blocks %u, avail %u, resrv %u\n",
+                         opts->rp_size, c->dirty_size, c->free_size,
+                         c->erasing_size, c->unchecked_size,
+                         c->nr_erasing_blocks, avail, c->nospc_dirty_size);
+
+       if (avail > opts->rp_size)
+               return 1;
+
+       /* Always allow root */
+       if (capable(CAP_SYS_RESOURCE))
+               return 1;
+
+       jffs2_dbg(1, "forbid writing\n");
+       return 0;
+}
+
 /**
  *     jffs2_reserve_space - request physical space to write nodes to flash
  *     @c: superblock info
@@ -55,6 +86,15 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
 
        spin_lock(&c->erase_completion_lock);
 
+       /*
+        * Check if the free space is greater then size of the reserved pool.
+        * If not, only allow root to proceed with writing.
+        */
+       if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
        /* this needs a little more thought (true <tglx> :)) */
        while(ret == -EAGAIN) {
                while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
@@ -158,6 +198,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
                        jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
                }
        }
+
+out:
        spin_unlock(&c->erase_completion_lock);
        if (!ret)
                ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
index 1cd3aec9d9ae282dd31226d0717aaf69a55f414d..bcd983d7e7f99e7e295decc1d26092d464a14d9f 100644 (file)
@@ -95,6 +95,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
 #define jffs2_ubivol(c) (0)
 #define jffs2_ubivol_setup(c) (0)
 #define jffs2_ubivol_cleanup(c) do {} while (0)
+#define jffs2_dirty_trigger(c) do {} while (0)
 
 #else /* NAND and/or ECC'd NOR support present */
 
@@ -135,14 +136,10 @@ void jffs2_ubivol_cleanup(struct jffs2_sb_info *c);
 #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
+void jffs2_dirty_trigger(struct jffs2_sb_info *c);
 
 #endif /* WRITEBUFFER */
 
-static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c)
-{
-       OFNI_BS_2SFFJ(c)->s_dirt = 1;
-}
-
 /* background.c */
 int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
 void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
index dc0437e8476322aaff40dc01737dcc2cabdc6976..1ea349fff68b625389c5647f03094ef56f5e4262 100644 (file)
@@ -1266,19 +1266,25 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                        /* Symlink's inode data is the target path. Read it and
                         * keep in RAM to facilitate quick follow symlink
                         * operation. */
-                       f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
+                       uint32_t csize = je32_to_cpu(latest_node->csize);
+                       if (csize > JFFS2_MAX_NAME_LEN) {
+                               mutex_unlock(&f->sem);
+                               jffs2_do_clear_inode(c, f);
+                               return -ENAMETOOLONG;
+                       }
+                       f->target = kmalloc(csize + 1, GFP_KERNEL);
                        if (!f->target) {
-                               JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
+                               JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize);
                                mutex_unlock(&f->sem);
                                jffs2_do_clear_inode(c, f);
                                return -ENOMEM;
                        }
 
                        ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
-                                               je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
+                                              csize, &retlen, (char *)f->target);
 
-                       if (ret  || retlen != je32_to_cpu(latest_node->csize)) {
-                               if (retlen != je32_to_cpu(latest_node->csize))
+                       if (ret || retlen != csize) {
+                               if (retlen != csize)
                                        ret = -EIO;
                                kfree(f->target);
                                f->target = NULL;
@@ -1287,7 +1293,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                                return ret;
                        }
 
-                       f->target[je32_to_cpu(latest_node->csize)] = '\0';
+                       f->target[csize] = '\0';
                        dbg_readinode("symlink's target '%s' cached\n", f->target);
                }
 
@@ -1415,6 +1421,7 @@ int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
                mutex_unlock(&f->sem);
                jffs2_do_clear_inode(c, f);
        }
+       jffs2_xattr_do_crccheck_inode(c, ic);
        kfree (f);
        return ret;
 }
index f9916f312bd81e3590fde1c92a025458cb64ab11..61ea41389f90d91d8b3ab6a6a39cd580720f1950 100644 (file)
@@ -63,21 +63,6 @@ static void jffs2_i_init_once(void *foo)
        inode_init_once(&f->vfs_inode);
 }
 
-static void jffs2_write_super(struct super_block *sb)
-{
-       struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
-
-       lock_super(sb);
-       sb->s_dirt = 0;
-
-       if (!(sb->s_flags & MS_RDONLY)) {
-               jffs2_dbg(1, "%s()\n", __func__);
-               jffs2_flush_wbuf_gc(c, 0);
-       }
-
-       unlock_super(sb);
-}
-
 static const char *jffs2_compr_name(unsigned int compr)
 {
        switch (compr) {
@@ -105,6 +90,8 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
 
        if (opts->override_compr)
                seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
+       if (opts->rp_size)
+               seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
 
        return 0;
 }
@@ -113,8 +100,6 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
 {
        struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
-       jffs2_write_super(sb);
-
        mutex_lock(&c->alloc_sem);
        jffs2_flush_wbuf_pad(c);
        mutex_unlock(&c->alloc_sem);
@@ -171,15 +156,18 @@ static const struct export_operations jffs2_export_ops = {
  * JFFS2 mount options.
  *
  * Opt_override_compr: override default compressor
+ * Opt_rp_size: size of reserved pool in KiB
  * Opt_err: just end of array marker
  */
 enum {
        Opt_override_compr,
+       Opt_rp_size,
        Opt_err,
 };
 
 static const match_table_t tokens = {
        {Opt_override_compr, "compr=%s"},
+       {Opt_rp_size, "rp_size=%u"},
        {Opt_err, NULL},
 };
 
@@ -187,6 +175,7 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
 {
        substring_t args[MAX_OPT_ARGS];
        char *p, *name;
+       unsigned int opt;
 
        if (!data)
                return 0;
@@ -224,6 +213,17 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
                        kfree(name);
                        c->mount_opts.override_compr = true;
                        break;
+               case Opt_rp_size:
+                       if (match_int(&args[0], &opt))
+                               return -EINVAL;
+                       opt *= 1024;
+                       if (opt > c->mtd->size) {
+                               pr_warn("Too large reserve pool specified, max "
+                                       "is %llu KB\n", c->mtd->size / 1024);
+                               return -EINVAL;
+                       }
+                       c->mount_opts.rp_size = opt;
+                       break;
                default:
                        pr_err("Error: unrecognized mount option '%s' or missing value\n",
                               p);
@@ -251,7 +251,6 @@ static const struct super_operations jffs2_super_operations =
        .alloc_inode =  jffs2_alloc_inode,
        .destroy_inode =jffs2_destroy_inode,
        .put_super =    jffs2_put_super,
-       .write_super =  jffs2_write_super,
        .statfs =       jffs2_statfs,
        .remount_fs =   jffs2_remount_fs,
        .evict_inode =  jffs2_evict_inode,
@@ -319,9 +318,6 @@ static void jffs2_put_super (struct super_block *sb)
 
        jffs2_dbg(2, "%s()\n", __func__);
 
-       if (sb->s_dirt)
-               jffs2_write_super(sb);
-
        mutex_lock(&c->alloc_sem);
        jffs2_flush_wbuf_pad(c);
        mutex_unlock(&c->alloc_sem);
index 74d9be19df3f1fff1d7defdc7824c90240a302f6..6f4529d3697fd3f97d5b018dbe9f5c0362cee034 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/jiffies.h>
 #include <linux/sched.h>
+#include <linux/writeback.h>
 
 #include "nodelist.h"
 
@@ -85,7 +86,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
 {
        struct jffs2_inodirty *new;
 
-       /* Mark the superblock dirty so that kupdated will flush... */
+       /* Schedule delayed write-buffer write-out */
        jffs2_dirty_trigger(c);
 
        if (jffs2_wbuf_pending_for_ino(c, ino))
@@ -1148,6 +1149,47 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
        return 1;
 }
 
+static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+
+       dwork = container_of(work, struct delayed_work, work);
+       return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
+}
+
+static void delayed_wbuf_sync(struct work_struct *work)
+{
+       struct jffs2_sb_info *c = work_to_sb(work);
+       struct super_block *sb = OFNI_BS_2SFFJ(c);
+
+       spin_lock(&c->wbuf_dwork_lock);
+       c->wbuf_queued = 0;
+       spin_unlock(&c->wbuf_dwork_lock);
+
+       if (!(sb->s_flags & MS_RDONLY)) {
+               jffs2_dbg(1, "%s()\n", __func__);
+               jffs2_flush_wbuf_gc(c, 0);
+       }
+}
+
+void jffs2_dirty_trigger(struct jffs2_sb_info *c)
+{
+       struct super_block *sb = OFNI_BS_2SFFJ(c);
+       unsigned long delay;
+
+       if (sb->s_flags & MS_RDONLY)
+               return;
+
+       spin_lock(&c->wbuf_dwork_lock);
+       if (!c->wbuf_queued) {
+               jffs2_dbg(1, "%s()\n", __func__);
+               delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+               queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
+               c->wbuf_queued = 1;
+       }
+       spin_unlock(&c->wbuf_dwork_lock);
+}
+
 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 {
        struct nand_ecclayout *oinfo = c->mtd->ecclayout;
@@ -1169,6 +1211,8 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 
        /* Initialise write buffer */
        init_rwsem(&c->wbuf_sem);
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
        c->wbuf_pagesize = c->mtd->writesize;
        c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1207,8 +1251,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
 
        /* Initialize write buffer */
        init_rwsem(&c->wbuf_sem);
-
-
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
        c->wbuf_pagesize =  c->mtd->erasesize;
 
        /* Find a suitable c->sector_size
@@ -1267,6 +1311,9 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
 
        /* Initialize write buffer */
        init_rwsem(&c->wbuf_sem);
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
        c->wbuf_pagesize = c->mtd->writesize;
        c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1299,6 +1346,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
                return 0;
 
        init_rwsem(&c->wbuf_sem);
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 
        c->wbuf_pagesize =  c->mtd->writesize;
        c->wbuf_ofs = 0xFFFFFFFF;
index b55b803eddcb92081908aa1f53da50cbdfc6babb..3034e970eb9a130cea79d7d413aa2463070408ff 100644 (file)
@@ -11,6 +11,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#define JFFS2_XATTR_IS_CORRUPTED       1
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -153,7 +155,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
                JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
                            offset, je32_to_cpu(rx.hdr_crc), crc);
                xd->flags |= JFFS2_XFLAGS_INVALID;
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
        if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
@@ -169,7 +171,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
                            je32_to_cpu(rx.xid), xd->xid,
                            je32_to_cpu(rx.version), xd->version);
                xd->flags |= JFFS2_XFLAGS_INVALID;
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        xd->xprefix = rx.xprefix;
        xd->name_len = rx.name_len;
@@ -227,12 +229,12 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
        data[xd->name_len] = '\0';
        crc = crc32(0, data, length);
        if (crc != xd->data_crc) {
-               JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)"
+               JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)"
                              " at %#08x, read: 0x%08x calculated: 0x%08x\n",
                              ref_offset(xd->node), xd->data_crc, crc);
                kfree(data);
                xd->flags |= JFFS2_XFLAGS_INVALID;
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
 
        xd->flags |= JFFS2_XFLAGS_HOT;
@@ -270,7 +272,7 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
        if (xd->xname)
                return 0;
        if (xd->flags & JFFS2_XFLAGS_INVALID)
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        if (unlikely(is_xattr_datum_unchecked(c, xd)))
                rc = do_verify_xattr_datum(c, xd);
        if (!rc)
@@ -435,6 +437,8 @@ static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datu
  *   is called to release xattr related objects when unmounting. 
  * check_xattr_ref_inode(c, ic)
  *   is used to confirm inode does not have duplicate xattr name/value pair.
+ * jffs2_xattr_do_crccheck_inode(c, ic)
+ *   is used to force xattr data integrity check during the initial gc scan.
  * -------------------------------------------------- */
 static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
 {
@@ -462,7 +466,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
        if (crc != je32_to_cpu(rr.node_crc)) {
                JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
                            offset, je32_to_cpu(rr.node_crc), crc);
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
            || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
@@ -472,7 +476,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
                            offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
                            je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
                            je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        ref->ino = je32_to_cpu(rr.ino);
        ref->xid = je32_to_cpu(rr.xid);
@@ -682,6 +686,11 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
        return rc;
 }
 
+void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+       check_xattr_ref_inode(c, ic);
+}
+
 /* -------- xattr subsystem functions ---------------
  * jffs2_init_xattr_subsystem(c)
  *   is used to initialize semaphore and list_head, and some variables.
index 7be4beb306f3ff06eb6df2a8427ca31cb2027e6f..467ff376ee265041b40d94d30ee73c6b7edae4aa 100644 (file)
@@ -77,6 +77,7 @@ extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
 extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
                                                         uint32_t xid, uint32_t version);
 
+extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 
@@ -108,6 +109,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
 #define jffs2_build_xattr_subsystem(c)
 #define jffs2_clear_xattr_subsystem(c)
 
+#define jffs2_xattr_do_crccheck_inode(c, ic)
 #define jffs2_xattr_delete_inode(c, ic)
 #define jffs2_xattr_free_inode(c, ic)
 #define jffs2_verify_xattr(c)                  (1)
index ba1dc2eebd1ef8413d0593abfde9e14229169ab3..ca0a08001449a999e7070253ba51f7607291285e 100644 (file)
@@ -56,7 +56,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
        u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
        int status;
 
-       status = lockd_up();
+       status = lockd_up(nlm_init->net);
        if (status < 0)
                return ERR_PTR(status);
 
@@ -65,7 +65,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
                                   nlm_init->hostname, nlm_init->noresvport,
                                   nlm_init->net);
        if (host == NULL) {
-               lockd_down();
+               lockd_down(nlm_init->net);
                return ERR_PTR(-ENOLCK);
        }
 
@@ -80,8 +80,10 @@ EXPORT_SYMBOL_GPL(nlmclnt_init);
  */
 void nlmclnt_done(struct nlm_host *host)
 {
+       struct net *net = host->net;
+
        nlmclnt_release_host(host);
-       lockd_down();
+       lockd_down(net);
 }
 EXPORT_SYMBOL_GPL(nlmclnt_done);
 
@@ -220,11 +222,12 @@ reclaimer(void *ptr)
        struct nlm_wait   *block;
        struct file_lock *fl, *next;
        u32 nsmstate;
+       struct net *net = host->net;
 
        allow_signal(SIGKILL);
 
        down_write(&host->h_rwsem);
-       lockd_up();     /* note: this cannot fail as lockd is already running */
+       lockd_up(net);  /* note: this cannot fail as lockd is already running */
 
        dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
 
@@ -275,6 +278,6 @@ restart:
 
        /* Release host handle after use */
        nlmclnt_release_host(host);
-       lockd_down();
+       lockd_down(net);
        return 0;
 }
index f49b9afc443690a2377db100ed7da33452ef98db..80938fda67e0e6fde67999d3556820b87b6acd33 100644 (file)
@@ -251,39 +251,40 @@ out_err:
        return err;
 }
 
-static int lockd_up_net(struct net *net)
+static int lockd_up_net(struct svc_serv *serv, struct net *net)
 {
        struct lockd_net *ln = net_generic(net, lockd_net_id);
-       struct svc_serv *serv = nlmsvc_rqst->rq_server;
        int error;
 
-       if (ln->nlmsvc_users)
+       if (ln->nlmsvc_users++)
                return 0;
 
-       error = svc_rpcb_setup(serv, net);
+       error = svc_bind(serv, net);
        if (error)
-               goto err_rpcb;
+               goto err_bind;
 
        error = make_socks(serv, net);
        if (error < 0)
                goto err_socks;
+       dprintk("lockd_up_net: per-net data created; net=%p\n", net);
        return 0;
 
 err_socks:
        svc_rpcb_cleanup(serv, net);
-err_rpcb:
+err_bind:
+       ln->nlmsvc_users--;
        return error;
 }
 
-static void lockd_down_net(struct net *net)
+static void lockd_down_net(struct svc_serv *serv, struct net *net)
 {
        struct lockd_net *ln = net_generic(net, lockd_net_id);
-       struct svc_serv *serv = nlmsvc_rqst->rq_server;
 
        if (ln->nlmsvc_users) {
                if (--ln->nlmsvc_users == 0) {
                        nlm_shutdown_hosts_net(net);
                        svc_shutdown_net(serv, net);
+                       dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
                }
        } else {
                printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
@@ -292,22 +293,60 @@ static void lockd_down_net(struct net *net)
        }
 }
 
-/*
- * Bring up the lockd process if it's not already up.
- */
-int lockd_up(void)
+static int lockd_start_svc(struct svc_serv *serv)
+{
+       int error;
+
+       if (nlmsvc_rqst)
+               return 0;
+
+       /*
+        * Create the kernel thread and wait for it to start.
+        */
+       nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
+       if (IS_ERR(nlmsvc_rqst)) {
+               error = PTR_ERR(nlmsvc_rqst);
+               printk(KERN_WARNING
+                       "lockd_up: svc_rqst allocation failed, error=%d\n",
+                       error);
+               goto out_rqst;
+       }
+
+       svc_sock_update_bufs(serv);
+       serv->sv_maxconn = nlm_max_connections;
+
+       nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
+       if (IS_ERR(nlmsvc_task)) {
+               error = PTR_ERR(nlmsvc_task);
+               printk(KERN_WARNING
+                       "lockd_up: kthread_run failed, error=%d\n", error);
+               goto out_task;
+       }
+       dprintk("lockd_up: service started\n");
+       return 0;
+
+out_task:
+       svc_exit_thread(nlmsvc_rqst);
+       nlmsvc_task = NULL;
+out_rqst:
+       nlmsvc_rqst = NULL;
+       return error;
+}
+
+static struct svc_serv *lockd_create_svc(void)
 {
        struct svc_serv *serv;
-       int             error = 0;
-       struct net *net = current->nsproxy->net_ns;
 
-       mutex_lock(&nlmsvc_mutex);
        /*
         * Check whether we're already up and running.
         */
        if (nlmsvc_rqst) {
-               error = lockd_up_net(net);
-               goto out;
+               /*
+                * Note: increase service usage, because later in case of error
+                * svc_destroy() will be called.
+                */
+               svc_get(nlmsvc_rqst->rq_server);
+               return nlmsvc_rqst->rq_server;
        }
 
        /*
@@ -318,59 +357,53 @@ int lockd_up(void)
                printk(KERN_WARNING
                        "lockd_up: no pid, %d users??\n", nlmsvc_users);
 
-       error = -ENOMEM;
        serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
        if (!serv) {
                printk(KERN_WARNING "lockd_up: create service failed\n");
-               goto out;
+               return ERR_PTR(-ENOMEM);
        }
+       dprintk("lockd_up: service created\n");
+       return serv;
+}
 
-       error = make_socks(serv, net);
-       if (error < 0)
-               goto destroy_and_out;
+/*
+ * Bring up the lockd process if it's not already up.
+ */
+int lockd_up(struct net *net)
+{
+       struct svc_serv *serv;
+       int error;
 
-       /*
-        * Create the kernel thread and wait for it to start.
-        */
-       nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
-       if (IS_ERR(nlmsvc_rqst)) {
-               error = PTR_ERR(nlmsvc_rqst);
-               nlmsvc_rqst = NULL;
-               printk(KERN_WARNING
-                       "lockd_up: svc_rqst allocation failed, error=%d\n",
-                       error);
-               goto destroy_and_out;
+       mutex_lock(&nlmsvc_mutex);
+
+       serv = lockd_create_svc();
+       if (IS_ERR(serv)) {
+               error = PTR_ERR(serv);
+               goto err_create;
        }
 
-       svc_sock_update_bufs(serv);
-       serv->sv_maxconn = nlm_max_connections;
+       error = lockd_up_net(serv, net);
+       if (error < 0)
+               goto err_net;
 
-       nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
-       if (IS_ERR(nlmsvc_task)) {
-               error = PTR_ERR(nlmsvc_task);
-               svc_exit_thread(nlmsvc_rqst);
-               nlmsvc_task = NULL;
-               nlmsvc_rqst = NULL;
-               printk(KERN_WARNING
-                       "lockd_up: kthread_run failed, error=%d\n", error);
-               goto destroy_and_out;
-       }
+       error = lockd_start_svc(serv);
+       if (error < 0)
+               goto err_start;
 
+       nlmsvc_users++;
        /*
         * Note: svc_serv structures have an initial use count of 1,
         * so we exit through here on both success and failure.
         */
-destroy_and_out:
+err_net:
        svc_destroy(serv);
-out:
-       if (!error) {
-               struct lockd_net *ln = net_generic(net, lockd_net_id);
-
-               ln->nlmsvc_users++;
-               nlmsvc_users++;
-       }
+err_create:
        mutex_unlock(&nlmsvc_mutex);
        return error;
+
+err_start:
+       lockd_down_net(serv, net);
+       goto err_net;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
@@ -378,14 +411,13 @@ EXPORT_SYMBOL_GPL(lockd_up);
  * Decrement the user count and bring down lockd if we're the last.
  */
 void
-lockd_down(void)
+lockd_down(struct net *net)
 {
        mutex_lock(&nlmsvc_mutex);
+       lockd_down_net(nlmsvc_rqst->rq_server, net);
        if (nlmsvc_users) {
-               if (--nlmsvc_users) {
-                       lockd_down_net(current->nsproxy->net_ns);
+               if (--nlmsvc_users)
                        goto out;
-               }
        } else {
                printk(KERN_ERR "lockd_down: no users! task=%p\n",
                        nlmsvc_task);
@@ -397,7 +429,9 @@ lockd_down(void)
                BUG();
        }
        kthread_stop(nlmsvc_task);
+       dprintk("lockd_down: service stopped\n");
        svc_exit_thread(nlmsvc_rqst);
+       dprintk("lockd_down: service destroyed\n");
        nlmsvc_task = NULL;
        nlmsvc_rqst = NULL;
 out:
index 4f441e46cef47bc67b08a3e82b78f389dfbbf818..fce6238d52c1bf742307325a588d545e169508e4 100644 (file)
@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        case F_WRLCK:
                return generic_add_lease(filp, arg, flp);
        default:
-               BUG();
+               return -EINVAL;
        }
 }
 EXPORT_SYMBOL(generic_setlease);
@@ -1636,12 +1636,13 @@ EXPORT_SYMBOL(flock_lock_file_wait);
 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
 {
        struct file *filp;
+       int fput_needed;
        struct file_lock *lock;
        int can_sleep, unlock;
        int error;
 
        error = -EBADF;
-       filp = fget(fd);
+       filp = fget_light(fd, &fput_needed);
        if (!filp)
                goto out;
 
@@ -1674,7 +1675,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
        locks_free_lock(lock);
 
  out_putf:
-       fput(filp);
+       fput_light(filp, fput_needed);
  out:
        return error;
 }
index c651f02c9fecb930c97a2668adc075090c04b7c9..7d694194024ac4d2459e7cc3d60014bdff64e3ba 100644 (file)
@@ -449,7 +449,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
        mntget(nd->path.mnt);
 
        rcu_read_unlock();
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        nd->flags &= ~LOOKUP_RCU;
        return 0;
 
@@ -507,14 +507,14 @@ static int complete_walk(struct nameidata *nd)
                if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
                        spin_unlock(&dentry->d_lock);
                        rcu_read_unlock();
-                       br_read_unlock(vfsmount_lock);
+                       br_read_unlock(&vfsmount_lock);
                        return -ECHILD;
                }
                BUG_ON(nd->inode != dentry->d_inode);
                spin_unlock(&dentry->d_lock);
                mntget(nd->path.mnt);
                rcu_read_unlock();
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
        }
 
        if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -681,15 +681,15 @@ int follow_up(struct path *path)
        struct mount *parent;
        struct dentry *mountpoint;
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        parent = mnt->mnt_parent;
        if (&parent->mnt == path->mnt) {
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return 0;
        }
        mntget(&parent->mnt);
        mountpoint = dget(mnt->mnt_mountpoint);
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        dput(path->dentry);
        path->dentry = mountpoint;
        mntput(path->mnt);
@@ -947,7 +947,7 @@ failed:
        if (!(nd->flags & LOOKUP_ROOT))
                nd->root.mnt = NULL;
        rcu_read_unlock();
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        return -ECHILD;
 }
 
@@ -1125,8 +1125,8 @@ static struct dentry *__lookup_hash(struct qstr *name,
  *  small and for now I'd prefer to have fast path as straight as possible.
  *  It _is_ time-critical.
  */
-static int do_lookup(struct nameidata *nd, struct qstr *name,
-                       struct path *path, struct inode **inode)
+static int lookup_fast(struct nameidata *nd, struct qstr *name,
+                      struct path *path, struct inode **inode)
 {
        struct vfsmount *mnt = nd->path.mnt;
        struct dentry *dentry, *parent = nd->path.dentry;
@@ -1208,7 +1208,7 @@ unlazy:
                        goto need_lookup;
                }
        }
-done:
+
        path->mnt = mnt;
        path->dentry = dentry;
        err = follow_managed(path, nd->flags);
@@ -1222,6 +1222,17 @@ done:
        return 0;
 
 need_lookup:
+       return 1;
+}
+
+/* Fast lookup failed, do it the slow way */
+static int lookup_slow(struct nameidata *nd, struct qstr *name,
+                      struct path *path)
+{
+       struct dentry *dentry, *parent;
+       int err;
+
+       parent = nd->path.dentry;
        BUG_ON(nd->inode != parent->d_inode);
 
        mutex_lock(&parent->d_inode->i_mutex);
@@ -1229,7 +1240,16 @@ need_lookup:
        mutex_unlock(&parent->d_inode->i_mutex);
        if (IS_ERR(dentry))
                return PTR_ERR(dentry);
-       goto done;
+       path->mnt = nd->path.mnt;
+       path->dentry = dentry;
+       err = follow_managed(path, nd->flags);
+       if (unlikely(err < 0)) {
+               path_put_conditional(path, nd);
+               return err;
+       }
+       if (err)
+               nd->flags |= LOOKUP_JUMPED;
+       return 0;
 }
 
 static inline int may_lookup(struct nameidata *nd)
@@ -1265,7 +1285,7 @@ static void terminate_walk(struct nameidata *nd)
                if (!(nd->flags & LOOKUP_ROOT))
                        nd->root.mnt = NULL;
                rcu_read_unlock();
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
        }
 }
 
@@ -1301,21 +1321,26 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
         */
        if (unlikely(type != LAST_NORM))
                return handle_dots(nd, type);
-       err = do_lookup(nd, name, path, &inode);
+       err = lookup_fast(nd, name, path, &inode);
        if (unlikely(err)) {
-               terminate_walk(nd);
-               return err;
-       }
-       if (!inode) {
-               path_to_nameidata(path, nd);
-               terminate_walk(nd);
-               return -ENOENT;
+               if (err < 0)
+                       goto out_err;
+
+               err = lookup_slow(nd, name, path);
+               if (err < 0)
+                       goto out_err;
+
+               inode = path->dentry->d_inode;
        }
+       err = -ENOENT;
+       if (!inode)
+               goto out_path_put;
+
        if (should_follow_link(inode, follow)) {
                if (nd->flags & LOOKUP_RCU) {
                        if (unlikely(unlazy_walk(nd, path->dentry))) {
-                               terminate_walk(nd);
-                               return -ECHILD;
+                               err = -ECHILD;
+                               goto out_err;
                        }
                }
                BUG_ON(inode != path->dentry->d_inode);
@@ -1324,6 +1349,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
        path_to_nameidata(path, nd);
        nd->inode = inode;
        return 0;
+
+out_path_put:
+       path_to_nameidata(path, nd);
+out_err:
+       terminate_walk(nd);
+       return err;
 }
 
 /*
@@ -1620,7 +1651,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                nd->path = nd->root;
                nd->inode = inode;
                if (flags & LOOKUP_RCU) {
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
                        nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
                } else {
@@ -1633,7 +1664,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
 
        if (*name=='/') {
                if (flags & LOOKUP_RCU) {
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
                        set_root_rcu(nd);
                } else {
@@ -1646,7 +1677,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                        struct fs_struct *fs = current->fs;
                        unsigned seq;
 
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
 
                        do {
@@ -1682,7 +1713,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                        if (fput_needed)
                                *fp = file;
                        nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
                } else {
                        path_get(&file->f_path);
@@ -2169,6 +2200,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        int want_write = 0;
        int acc_mode = op->acc_mode;
        struct file *filp;
+       struct inode *inode;
+       int symlink_ok = 0;
+       struct path save_parent = { .dentry = NULL, .mnt = NULL };
+       bool retried = false;
        int error;
 
        nd->flags &= ~LOOKUP_PARENT;
@@ -2200,30 +2235,23 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        }
 
        if (!(open_flag & O_CREAT)) {
-               int symlink_ok = 0;
                if (nd->last.name[nd->last.len])
                        nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
                if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
                        symlink_ok = 1;
                /* we _can_ be in RCU mode here */
-               error = walk_component(nd, path, &nd->last, LAST_NORM,
-                                       !symlink_ok);
-               if (error < 0)
-                       return ERR_PTR(error);
-               if (error) /* symlink */
-                       return NULL;
-               /* sayonara */
-               error = complete_walk(nd);
-               if (error)
-                       return ERR_PTR(error);
+               error = lookup_fast(nd, &nd->last, path, &inode);
+               if (unlikely(error)) {
+                       if (error < 0)
+                               goto exit;
 
-               error = -ENOTDIR;
-               if (nd->flags & LOOKUP_DIRECTORY) {
-                       if (!nd->inode->i_op->lookup)
+                       error = lookup_slow(nd, &nd->last, path);
+                       if (error < 0)
                                goto exit;
+
+                       inode = path->dentry->d_inode;
                }
-               audit_inode(pathname, nd->path.dentry);
-               goto ok;
+               goto finish_lookup;
        }
 
        /* create side of things */
@@ -2241,6 +2269,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        if (nd->last.name[nd->last.len])
                goto exit;
 
+retry_lookup:
        mutex_lock(&dir->d_inode->i_mutex);
 
        dentry = lookup_hash(nd);
@@ -2302,22 +2331,49 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        if (error)
                nd->flags |= LOOKUP_JUMPED;
 
+       BUG_ON(nd->flags & LOOKUP_RCU);
+       inode = path->dentry->d_inode;
+finish_lookup:
+       /* we _can_ be in RCU mode here */
        error = -ENOENT;
-       if (!path->dentry->d_inode)
-               goto exit_dput;
+       if (!inode) {
+               path_to_nameidata(path, nd);
+               goto exit;
+       }
 
-       if (path->dentry->d_inode->i_op->follow_link)
+       if (should_follow_link(inode, !symlink_ok)) {
+               if (nd->flags & LOOKUP_RCU) {
+                       if (unlikely(unlazy_walk(nd, path->dentry))) {
+                               error = -ECHILD;
+                               goto exit;
+                       }
+               }
+               BUG_ON(inode != path->dentry->d_inode);
                return NULL;
+       }
 
-       path_to_nameidata(path, nd);
-       nd->inode = path->dentry->d_inode;
+       if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
+               path_to_nameidata(path, nd);
+       } else {
+               save_parent.dentry = nd->path.dentry;
+               save_parent.mnt = mntget(path->mnt);
+               nd->path.dentry = path->dentry;
+
+       }
+       nd->inode = inode;
        /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
        error = complete_walk(nd);
-       if (error)
+       if (error) {
+               path_put(&save_parent);
                return ERR_PTR(error);
+       }
        error = -EISDIR;
-       if (S_ISDIR(nd->inode->i_mode))
+       if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
+               goto exit;
+       error = -ENOTDIR;
+       if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
                goto exit;
+       audit_inode(pathname, nd->path.dentry);
 ok:
        if (!S_ISREG(nd->inode->i_mode))
                will_truncate = 0;
@@ -2333,6 +2389,20 @@ common:
        if (error)
                goto exit;
        filp = nameidata_to_filp(nd);
+       if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) {
+               BUG_ON(save_parent.dentry != dir);
+               path_put(&nd->path);
+               nd->path = save_parent;
+               nd->inode = dir->d_inode;
+               save_parent.mnt = NULL;
+               save_parent.dentry = NULL;
+               if (want_write) {
+                       mnt_drop_write(nd->path.mnt);
+                       want_write = 0;
+               }
+               retried = true;
+               goto retry_lookup;
+       }
        if (!IS_ERR(filp)) {
                error = ima_file_check(filp, op->acc_mode);
                if (error) {
@@ -2352,7 +2422,8 @@ common:
 out:
        if (want_write)
                mnt_drop_write(nd->path.mnt);
-       path_put(&nd->path);
+       path_put(&save_parent);
+       terminate_walk(nd);
        return filp;
 
 exit_mutex_unlock:
@@ -2415,6 +2486,12 @@ out:
        if (base)
                fput(base);
        release_open_intent(nd);
+       if (filp == ERR_PTR(-EOPENSTALE)) {
+               if (flags & LOOKUP_RCU)
+                       filp = ERR_PTR(-ECHILD);
+               else
+                       filp = ERR_PTR(-ESTALE);
+       }
        return filp;
 
 out_filp:
index e6081996c9a2f9d26525740545445630c4737583..1e4a5fe3d7b7f789d66839f37b1f917c1fa3e2ba 100644 (file)
@@ -397,7 +397,7 @@ static int mnt_make_readonly(struct mount *mnt)
 {
        int ret = 0;
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
        /*
         * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -431,15 +431,15 @@ static int mnt_make_readonly(struct mount *mnt)
         */
        smp_wmb();
        mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        return ret;
 }
 
 static void __mnt_unmake_readonly(struct mount *mnt)
 {
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        mnt->mnt.mnt_flags &= ~MNT_READONLY;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 }
 
 int sb_prepare_remount_readonly(struct super_block *sb)
@@ -451,7 +451,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
        if (atomic_long_read(&sb->s_remove_count))
                return -EBUSY;
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
                if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
                        mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -473,7 +473,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
                if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
                        mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        return err;
 }
@@ -522,14 +522,14 @@ struct vfsmount *lookup_mnt(struct path *path)
 {
        struct mount *child_mnt;
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
        if (child_mnt) {
                mnt_add_count(child_mnt, 1);
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return &child_mnt->mnt;
        } else {
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return NULL;
        }
 }
@@ -714,9 +714,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
        mnt->mnt.mnt_sb = root->d_sb;
        mnt->mnt_mountpoint = mnt->mnt.mnt_root;
        mnt->mnt_parent = mnt;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        return &mnt->mnt;
 }
 EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -745,9 +745,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
                mnt->mnt.mnt_root = dget(root);
                mnt->mnt_mountpoint = mnt->mnt.mnt_root;
                mnt->mnt_parent = mnt;
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
 
                if (flag & CL_SLAVE) {
                        list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -803,35 +803,36 @@ static void mntput_no_expire(struct mount *mnt)
 {
 put_again:
 #ifdef CONFIG_SMP
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        if (likely(atomic_read(&mnt->mnt_longterm))) {
                mnt_add_count(mnt, -1);
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return;
        }
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        mnt_add_count(mnt, -1);
        if (mnt_get_count(mnt)) {
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                return;
        }
 #else
        mnt_add_count(mnt, -1);
        if (likely(mnt_get_count(mnt)))
                return;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 #endif
        if (unlikely(mnt->mnt_pinned)) {
                mnt_add_count(mnt, mnt->mnt_pinned + 1);
                mnt->mnt_pinned = 0;
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                acct_auto_close_mnt(&mnt->mnt);
                goto put_again;
        }
+
        list_del(&mnt->mnt_instance);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        mntfree(mnt);
 }
 
@@ -857,21 +858,21 @@ EXPORT_SYMBOL(mntget);
 
 void mnt_pin(struct vfsmount *mnt)
 {
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        real_mount(mnt)->mnt_pinned++;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 }
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *m)
 {
        struct mount *mnt = real_mount(m);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        if (mnt->mnt_pinned) {
                mnt_add_count(mnt, 1);
                mnt->mnt_pinned--;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 }
 EXPORT_SYMBOL(mnt_unpin);
 
@@ -988,12 +989,12 @@ int may_umount_tree(struct vfsmount *m)
        BUG_ON(!m);
 
        /* write lock needed for mnt_get_count */
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        for (p = mnt; p; p = next_mnt(p, mnt)) {
                actual_refs += mnt_get_count(p);
                minimum_refs += 2;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        if (actual_refs > minimum_refs)
                return 0;
@@ -1020,10 +1021,10 @@ int may_umount(struct vfsmount *mnt)
 {
        int ret = 1;
        down_read(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        if (propagate_mount_busy(real_mount(mnt), 2))
                ret = 0;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_read(&namespace_sem);
        return ret;
 }
@@ -1040,13 +1041,13 @@ void release_mounts(struct list_head *head)
                        struct dentry *dentry;
                        struct mount *m;
 
-                       br_write_lock(vfsmount_lock);
+                       br_write_lock(&vfsmount_lock);
                        dentry = mnt->mnt_mountpoint;
                        m = mnt->mnt_parent;
                        mnt->mnt_mountpoint = mnt->mnt.mnt_root;
                        mnt->mnt_parent = mnt;
                        m->mnt_ghosts--;
-                       br_write_unlock(vfsmount_lock);
+                       br_write_unlock(&vfsmount_lock);
                        dput(dentry);
                        mntput(&m->mnt);
                }
@@ -1073,8 +1074,9 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
                list_del_init(&p->mnt_expire);
                list_del_init(&p->mnt_list);
                __touch_mnt_namespace(p->mnt_ns);
+               if (p->mnt_ns)
+                       __mnt_make_shortterm(p);
                p->mnt_ns = NULL;
-               __mnt_make_shortterm(p);
                list_del_init(&p->mnt_child);
                if (mnt_has_parent(p)) {
                        p->mnt_parent->mnt_ghosts++;
@@ -1112,12 +1114,12 @@ static int do_umount(struct mount *mnt, int flags)
                 * probably don't strictly need the lock here if we examined
                 * all race cases, but it's a slowpath.
                 */
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                if (mnt_get_count(mnt) != 2) {
-                       br_write_unlock(vfsmount_lock);
+                       br_write_unlock(&vfsmount_lock);
                        return -EBUSY;
                }
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
 
                if (!xchg(&mnt->mnt_expiry_mark, 1))
                        return -EAGAIN;
@@ -1159,7 +1161,7 @@ static int do_umount(struct mount *mnt, int flags)
        }
 
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        event++;
 
        if (!(flags & MNT_DETACH))
@@ -1171,7 +1173,7 @@ static int do_umount(struct mount *mnt, int flags)
                        umount_tree(mnt, 1, &umount_list);
                retval = 0;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        return retval;
@@ -1286,19 +1288,19 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                        q = clone_mnt(p, p->mnt.mnt_root, flag);
                        if (!q)
                                goto Enomem;
-                       br_write_lock(vfsmount_lock);
+                       br_write_lock(&vfsmount_lock);
                        list_add_tail(&q->mnt_list, &res->mnt_list);
                        attach_mnt(q, &path);
-                       br_write_unlock(vfsmount_lock);
+                       br_write_unlock(&vfsmount_lock);
                }
        }
        return res;
 Enomem:
        if (res) {
                LIST_HEAD(umount_list);
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                umount_tree(res, 0, &umount_list);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                release_mounts(&umount_list);
        }
        return NULL;
@@ -1318,9 +1320,9 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        LIST_HEAD(umount_list);
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        umount_tree(real_mount(mnt), 0, &umount_list);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
 }
@@ -1448,7 +1450,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
        if (err)
                goto out_cleanup_ids;
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 
        if (IS_MNT_SHARED(dest_mnt)) {
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1467,7 +1469,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                list_del_init(&child->mnt_hash);
                commit_tree(child);
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        return 0;
 
@@ -1565,10 +1567,10 @@ static int do_change_type(struct path *path, int flag)
                        goto out_unlock;
        }
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
                change_mnt_propagation(m, type);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
  out_unlock:
        up_write(&namespace_sem);
@@ -1617,9 +1619,9 @@ static int do_loopback(struct path *path, char *old_name,
 
        err = graft_tree(mnt, path);
        if (err) {
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                umount_tree(mnt, 0, &umount_list);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
        }
 out2:
        unlock_mount(path);
@@ -1677,16 +1679,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        else
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
                mnt->mnt.mnt_flags = mnt_flags;
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
        }
        up_write(&sb->s_umount);
        if (!err) {
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                touch_mnt_namespace(mnt->mnt_ns);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
        }
        return err;
 }
@@ -1893,9 +1895,9 @@ fail:
        /* remove m from any expiration list it may be on */
        if (!list_empty(&mnt->mnt_expire)) {
                down_write(&namespace_sem);
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                list_del_init(&mnt->mnt_expire);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                up_write(&namespace_sem);
        }
        mntput(m);
@@ -1911,11 +1913,11 @@ fail:
 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
 {
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 
        list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
 
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
 }
 EXPORT_SYMBOL(mnt_set_expiry);
@@ -1935,7 +1937,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                return;
 
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 
        /* extract from the expiration list every vfsmount that matches the
         * following criteria:
@@ -1954,7 +1956,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                touch_mnt_namespace(mnt->mnt_ns);
                umount_tree(mnt, 1, &umounts);
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
 
        release_mounts(&umounts);
@@ -2218,9 +2220,9 @@ void mnt_make_shortterm(struct vfsmount *m)
        struct mount *mnt = real_mount(m);
        if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
                return;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        atomic_dec(&mnt->mnt_longterm);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 #endif
 }
 
@@ -2250,9 +2252,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
                return ERR_PTR(-ENOMEM);
        }
        new_ns->root = new;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        list_add_tail(&new_ns->list, &new->mnt_list);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        /*
         * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2416,9 +2418,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
 int path_is_under(struct path *path1, struct path *path2)
 {
        int res;
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        return res;
 }
 EXPORT_SYMBOL(path_is_under);
@@ -2505,7 +2507,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* make sure we can reach put_old from new_root */
        if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
                goto out4;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        detach_mnt(new_mnt, &parent_path);
        detach_mnt(root_mnt, &root_parent);
        /* mount old root on put_old */
@@ -2513,7 +2515,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* mount new_root on / */
        attach_mnt(new_mnt, &root_parent);
        touch_mnt_namespace(current->nsproxy->mnt_ns);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        chroot_fs_refs(&root, &new);
        error = 0;
 out4:
@@ -2576,7 +2578,7 @@ void __init mnt_init(void)
        for (u = 0; u < HASH_SIZE; u++)
                INIT_LIST_HEAD(&mount_hashtable[u]);
 
-       br_lock_init(vfsmount_lock);
+       br_lock_init(&vfsmount_lock);
 
        err = sysfs_init();
        if (err)
@@ -2596,9 +2598,9 @@ void put_mnt_ns(struct mnt_namespace *ns)
        if (!atomic_dec_and_test(&ns->count))
                return;
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        umount_tree(ns->root, 0, &umount_list);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        kfree(ns);
index 3ff5fcc1528fd21ae18a7a240ec9f2920ec30d32..122e260247f53c663550073fda567a4342b0ba63 100644 (file)
@@ -221,6 +221,10 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
 
        already_written = 0;
 
+       errno = file_update_time(file);
+       if (errno)
+               goto outrel;
+
        bouncebuffer = vmalloc(bufsize);
        if (!bouncebuffer) {
                errno = -EIO;   /* -ENOMEM */
@@ -252,8 +256,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
        }
        vfree(bouncebuffer);
 
-       file_update_time(file);
-
        *ppos = pos;
 
        if (pos > i_size_read(inode)) {
index 4af803f13516c98deaf7372af2dda0499e329fe6..54cc0cdb3dcbda111e24a3a67e7953e5173dd07e 100644 (file)
@@ -23,17 +23,17 @@ struct ncp_mount_data_kernel {
        unsigned long    flags;         /* NCP_MOUNT_* flags */
        unsigned int     int_flags;     /* internal flags */
 #define NCP_IMOUNT_LOGGEDIN_POSSIBLE   0x0001
-       __kernel_uid32_t mounted_uid;   /* Who may umount() this filesystem? */
+       uid_t            mounted_uid;   /* Who may umount() this filesystem? */
        struct pid      *wdog_pid;      /* Who cares for our watchdog packets? */
        unsigned int     ncp_fd;        /* The socket to the ncp port */
        unsigned int     time_out;      /* How long should I wait after
                                           sending a NCP request? */
        unsigned int     retry_count;   /* And how often should I retry? */
        unsigned char    mounted_vol[NCP_VOLNAME_LEN + 1];
-       __kernel_uid32_t uid;
-       __kernel_gid32_t gid;
-       __kernel_mode_t  file_mode;
-       __kernel_mode_t  dir_mode;
+       uid_t            uid;
+       gid_t            gid;
+       umode_t          file_mode;
+       umode_t          dir_mode;
        int              info_fd;
 };
 
index eb95f5091c1aff93930e17a829a808023edc2e12..23ff18fe080afd0b6470e6d72df4fa9a8b9d0dab 100644 (file)
@@ -106,7 +106,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
 {
        int ret;
 
-       ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET,
+       ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
                                nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
        if (ret <= 0)
                goto out_err;
@@ -114,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
        dprintk("NFS: Callback listener port = %u (af %u)\n",
                        nfs_callback_tcpport, PF_INET);
 
-       ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6,
+       ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
                                nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
        if (ret > 0) {
                nfs_callback_tcpport6 = ret;
@@ -183,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
         * fore channel connection.
         * Returns the input port (0) and sets the svc_serv bc_xprt on success
         */
-       ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0,
+       ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
                              SVC_SOCK_ANONYMOUS);
        if (ret < 0) {
                rqstp = ERR_PTR(ret);
@@ -253,6 +253,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
        char svc_name[12];
        int ret = 0;
        int minorversion_setup;
+       struct net *net = &init_net;
 
        mutex_lock(&nfs_callback_mutex);
        if (cb_info->users++ || cb_info->task != NULL) {
@@ -265,6 +266,12 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
                goto out_err;
        }
 
+       ret = svc_bind(serv, net);
+       if (ret < 0) {
+               printk(KERN_WARNING "NFS: bind callback service failed\n");
+               goto out_err;
+       }
+
        minorversion_setup =  nfs_minorversion_callback_svc_setup(minorversion,
                                        serv, xprt, &rqstp, &callback_svc);
        if (!minorversion_setup) {
@@ -306,6 +313,8 @@ out_err:
        dprintk("NFS: Couldn't create callback socket or server thread; "
                "err = %d\n", ret);
        cb_info->users--;
+       if (serv)
+               svc_shutdown_net(serv, net);
        goto out;
 }
 
@@ -320,6 +329,7 @@ void nfs_callback_down(int minorversion)
        cb_info->users--;
        if (cb_info->users == 0 && cb_info->task != NULL) {
                kthread_stop(cb_info->task);
+               svc_shutdown_net(cb_info->serv, &init_net);
                svc_exit_thread(cb_info->rqst);
                cb_info->serv = NULL;
                cb_info->rqst = NULL;
@@ -332,7 +342,7 @@ void nfs_callback_down(int minorversion)
 int
 check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
 {
-       char *p = svc_gss_principal(rqstp);
+       char *p = rqstp->rq_cred.cr_principal;
 
        if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
                return 1;
index 95bfc243992c1a822041d7a205bbca23162bf91d..e64b01d2a338274a459bc9c56c60dacf282aad84 100644 (file)
@@ -455,9 +455,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
        args->csa_nrclists = ntohl(*p++);
        args->csa_rclists = NULL;
        if (args->csa_nrclists) {
-               args->csa_rclists = kmalloc(args->csa_nrclists *
-                                           sizeof(*args->csa_rclists),
-                                           GFP_KERNEL);
+               args->csa_rclists = kmalloc_array(args->csa_nrclists,
+                                                 sizeof(*args->csa_rclists),
+                                                 GFP_KERNEL);
                if (unlikely(args->csa_rclists == NULL))
                        goto out;
 
@@ -696,7 +696,7 @@ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
                                       const struct cb_sequenceres *res)
 {
        __be32 *p;
-       unsigned status = res->csr_status;
+       __be32 status = res->csr_status;
 
        if (unlikely(status != 0))
                goto out;
index 7d108753af81e9783ab1465c8bb9986452a6cf00..f005b5bebdc73bba4d548d134699dd4f00c471ca 100644 (file)
@@ -207,7 +207,6 @@ error_0:
 static void nfs4_shutdown_session(struct nfs_client *clp)
 {
        if (nfs4_has_session(clp)) {
-               nfs4_deviceid_purge_client(clp);
                nfs4_destroy_session(clp->cl_session);
                nfs4_destroy_clientid(clp);
        }
@@ -544,8 +543,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init,
 
        smp_rmb();
 
-       BUG_ON(clp->cl_cons_state != NFS_CS_READY);
-
        dprintk("<-- %s found nfs_client %p for %s\n",
                __func__, clp, cl_init->hostname ?: "");
        return clp;
index 0989a2099688a377279d76f4f8c56dbc91070027..f430057ff3b397c2fe1f523bf5fcea4135276f8c 100644 (file)
@@ -1354,10 +1354,10 @@ out:
 }
 
 #ifdef CONFIG_NFS_V4
-static int nfs_open_revalidate(struct dentry *, struct nameidata *);
+static int nfs4_lookup_revalidate(struct dentry *, struct nameidata *);
 
 const struct dentry_operations nfs4_dentry_operations = {
-       .d_revalidate   = nfs_open_revalidate,
+       .d_revalidate   = nfs4_lookup_revalidate,
        .d_delete       = nfs_dentry_delete,
        .d_iput         = nfs_dentry_iput,
        .d_automount    = nfs_d_automount,
@@ -1519,13 +1519,11 @@ no_open:
        return nfs_lookup(dir, dentry, nd);
 }
 
-static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
        struct dentry *parent = NULL;
        struct inode *inode;
        struct inode *dir;
-       struct nfs_open_context *ctx;
-       struct iattr attr;
        int openflags, ret = 0;
 
        if (nd->flags & LOOKUP_RCU)
@@ -1554,57 +1552,13 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
        /* We cannot do exclusive creation on a positive dentry */
        if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
                goto no_open_dput;
-       /* We can't create new files here */
-       openflags &= ~(O_CREAT|O_EXCL);
-
-       ctx = create_nfs_open_context(dentry, openflags);
-       ret = PTR_ERR(ctx);
-       if (IS_ERR(ctx))
-               goto out;
 
-       attr.ia_valid = ATTR_OPEN;
-       if (openflags & O_TRUNC) {
-               attr.ia_valid |= ATTR_SIZE;
-               attr.ia_size = 0;
-               nfs_wb_all(inode);
-       }
-
-       /*
-        * Note: we're not holding inode->i_mutex and so may be racing with
-        * operations that change the directory. We therefore save the
-        * change attribute *before* we do the RPC call.
-        */
-       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
-       if (IS_ERR(inode)) {
-               ret = PTR_ERR(inode);
-               switch (ret) {
-               case -EPERM:
-               case -EACCES:
-               case -EDQUOT:
-               case -ENOSPC:
-               case -EROFS:
-                       goto out_put_ctx;
-               default:
-                       goto out_drop;
-               }
-       }
-       iput(inode);
-       if (inode != dentry->d_inode)
-               goto out_drop;
+       /* Let f_op->open() actually open (and revalidate) the file */
+       ret = 1;
 
-       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
-       ret = nfs_intent_set_file(nd, ctx);
-       if (ret >= 0)
-               ret = 1;
 out:
        dput(parent);
        return ret;
-out_drop:
-       d_drop(dentry);
-       ret = 0;
-out_put_ctx:
-       put_nfs_open_context(ctx);
-       goto out;
 
 no_open_dput:
        dput(parent);
index 23d170bc44f4b88bea2016983bb7fa7daa2a0ae5..48253372ab1d115def0b81f56b097db6e98a0f88 100644 (file)
@@ -454,6 +454,12 @@ out:
        return result;
 }
 
+static void nfs_inode_dio_write_done(struct inode *inode)
+{
+       nfs_zap_mapping(inode, inode->i_mapping);
+       inode_dio_done(inode);
+}
+
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 {
@@ -478,17 +484,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 
        list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
                if (!nfs_pageio_add_request(&desc, req)) {
+                       nfs_list_remove_request(req);
                        nfs_list_add_request(req, &failed);
                        spin_lock(cinfo.lock);
                        dreq->flags = 0;
                        dreq->error = -EIO;
                        spin_unlock(cinfo.lock);
                }
+               nfs_release_request(req);
        }
        nfs_pageio_complete(&desc);
 
-       while (!list_empty(&failed))
+       while (!list_empty(&failed)) {
+               req = nfs_list_entry(failed.next);
+               nfs_list_remove_request(req);
                nfs_unlock_and_release_request(req);
+       }
 
        if (put_dreq(dreq))
                nfs_direct_write_complete(dreq, dreq->inode);
@@ -517,9 +528,9 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
                nfs_list_remove_request(req);
                if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
                        /* Note the rewrite will go through mds */
-                       kref_get(&req->wb_kref);
                        nfs_mark_request_commit(req, NULL, &cinfo);
-               }
+               } else
+                       nfs_release_request(req);
                nfs_unlock_and_release_request(req);
        }
 
@@ -564,7 +575,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
                        nfs_direct_write_reschedule(dreq);
                        break;
                default:
-                       nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
+                       nfs_inode_dio_write_done(dreq->inode);
                        nfs_direct_complete(dreq);
        }
 }
@@ -581,7 +592,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
 
 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 {
-       nfs_zap_mapping(inode, inode->i_mapping);
+       nfs_inode_dio_write_done(inode);
        nfs_direct_complete(dreq);
 }
 #endif
@@ -710,12 +721,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
                        if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
                                bit = NFS_IOHDR_NEED_RESCHED;
                        else if (dreq->flags == 0) {
-                               memcpy(&dreq->verf, &req->wb_verf,
+                               memcpy(&dreq->verf, hdr->verf,
                                       sizeof(dreq->verf));
                                bit = NFS_IOHDR_NEED_COMMIT;
                                dreq->flags = NFS_ODIRECT_DO_COMMIT;
                        } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
-                               if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
+                               if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
                                        dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
                                        bit = NFS_IOHDR_NEED_RESCHED;
                                } else
@@ -766,14 +777,16 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                                               loff_t pos)
 {
        struct nfs_pageio_descriptor desc;
+       struct inode *inode = dreq->inode;
        ssize_t result = 0;
        size_t requested_bytes = 0;
        unsigned long seg;
 
-       nfs_pageio_init_write(&desc, dreq->inode, FLUSH_COND_STABLE,
+       nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
                              &nfs_direct_write_completion_ops);
        desc.pg_dreq = dreq;
        get_dreq(dreq);
+       atomic_inc(&inode->i_dio_count);
 
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
@@ -793,6 +806,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
         * generic layer handle the completion.
         */
        if (requested_bytes == 0) {
+               inode_dio_done(inode);
                nfs_direct_req_release(dreq);
                return result < 0 ? result : -EIO;
        }
index 56311ca5f9f8183d3aa8c3aa8c9922db05ee32a3..a6708e6b438dd55f2924e5bb78c809c1575a97a9 100644 (file)
@@ -879,12 +879,81 @@ const struct file_operations nfs_file_operations = {
 static int
 nfs4_file_open(struct inode *inode, struct file *filp)
 {
+       struct nfs_open_context *ctx;
+       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *parent = NULL;
+       struct inode *dir;
+       unsigned openflags = filp->f_flags;
+       struct iattr attr;
+       int err;
+
+       BUG_ON(inode != dentry->d_inode);
        /*
-        * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
-        * this point, then something is very wrong
+        * If no cached dentry exists or if it's negative, NFSv4 handled the
+        * opens in ->lookup() or ->create().
+        *
+        * We only get this far for a cached positive dentry.  We skipped
+        * revalidation, so handle it here by dropping the dentry and returning
+        * -EOPENSTALE.  The VFS will retry the lookup/create/open.
         */
-       dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
-       return -ENOTDIR;
+
+       dprintk("NFS: open file(%s/%s)\n",
+               dentry->d_parent->d_name.name,
+               dentry->d_name.name);
+
+       if ((openflags & O_ACCMODE) == 3)
+               openflags--;
+
+       /* We can't create new files here */
+       openflags &= ~(O_CREAT|O_EXCL);
+
+       parent = dget_parent(dentry);
+       dir = parent->d_inode;
+
+       ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+       err = PTR_ERR(ctx);
+       if (IS_ERR(ctx))
+               goto out;
+
+       attr.ia_valid = ATTR_OPEN;
+       if (openflags & O_TRUNC) {
+               attr.ia_valid |= ATTR_SIZE;
+               attr.ia_size = 0;
+               nfs_wb_all(inode);
+       }
+
+       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+               switch (err) {
+               case -EPERM:
+               case -EACCES:
+               case -EDQUOT:
+               case -ENOSPC:
+               case -EROFS:
+                       goto out_put_ctx;
+               default:
+                       goto out_drop;
+               }
+       }
+       iput(inode);
+       if (inode != dentry->d_inode)
+               goto out_drop;
+
+       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+       nfs_file_set_open_context(filp, ctx);
+       err = 0;
+
+out_put_ctx:
+       put_nfs_open_context(ctx);
+out:
+       dput(parent);
+       return err;
+
+out_drop:
+       d_drop(dentry);
+       err = -EOPENSTALE;
+       goto out_put_ctx;
 }
 
 const struct file_operations nfs4_file_operations = {
index b5b86a05059c8c0cf157495878bad3621a25a8dc..864c51e4b400e5c7248bdd93eaed19772c0f23f2 100644 (file)
@@ -57,6 +57,11 @@ unsigned int nfs_idmap_cache_timeout = 600;
 static const struct cred *id_resolver_cache;
 static struct key_type key_type_id_resolver_legacy;
 
+struct idmap {
+       struct rpc_pipe         *idmap_pipe;
+       struct key_construction *idmap_key_cons;
+       struct mutex            idmap_mutex;
+};
 
 /**
  * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
@@ -310,9 +315,11 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
                                            name, namelen, type, data,
                                            data_size, NULL);
        if (ret < 0) {
+               mutex_lock(&idmap->idmap_mutex);
                ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
                                            name, namelen, type, data,
                                            data_size, idmap);
+               mutex_unlock(&idmap->idmap_mutex);
        }
        return ret;
 }
@@ -354,11 +361,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
 /* idmap classic begins here */
 module_param(nfs_idmap_cache_timeout, int, 0644);
 
-struct idmap {
-       struct rpc_pipe         *idmap_pipe;
-       struct key_construction *idmap_key_cons;
-};
-
 enum {
        Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
 };
@@ -469,6 +471,7 @@ nfs_idmap_new(struct nfs_client *clp)
                return error;
        }
        idmap->idmap_pipe = pipe;
+       mutex_init(&idmap->idmap_mutex);
 
        clp->cl_idmap = idmap;
        return 0;
index 2f6f78c4b42d7f263419f7cf75153d5766f06c71..f7296983eba60c5ea21f164600be800ea988977f 100644 (file)
@@ -418,8 +418,10 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
                return 0;
 
        /* Write all dirty data */
-       if (S_ISREG(inode->i_mode))
+       if (S_ISREG(inode->i_mode)) {
+               nfs_inode_dio_wait(inode);
                nfs_wb_all(inode);
+       }
 
        fattr = nfs_alloc_fattr();
        if (fattr == NULL)
@@ -503,6 +505,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 
        /* Flush out writes to the server in order to update c/mtime.  */
        if (S_ISREG(inode->i_mode)) {
+               nfs_inode_dio_wait(inode);
                err = filemap_write_and_wait(inode->i_mapping);
                if (err)
                        goto out;
@@ -1527,7 +1530,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
        nfsi->delegation_state = 0;
        init_rwsem(&nfsi->rwsem);
        nfsi->layout = NULL;
-       atomic_set(&nfsi->commit_info.rpcs_out, 0);
 #endif
 }
 
@@ -1542,6 +1544,7 @@ static void init_once(void *foo)
        INIT_LIST_HEAD(&nfsi->commit_info.list);
        nfsi->npages = 0;
        nfsi->commit_info.ncommit = 0;
+       atomic_set(&nfsi->commit_info.rpcs_out, 0);
        atomic_set(&nfsi->silly_count, 1);
        INIT_HLIST_HEAD(&nfsi->silly_list);
        init_waitqueue_head(&nfsi->waitqueue);
index 1848a72755928f226807e5971355331368c3ad1d..18f99ef7134387128507e8ffd93fff6943b241d6 100644 (file)
@@ -369,6 +369,10 @@ extern int nfs_migrate_page(struct address_space *,
 /* direct.c */
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
                              struct nfs_direct_req *dreq);
+static inline void nfs_inode_dio_wait(struct inode *inode)
+{
+       inode_dio_wait(inode);
+}
 
 /* nfs4proc.c */
 extern void __nfs4_read_done_cb(struct nfs_read_data *);
index c6827f93ab57caeab4e613e97919c04a51aa5d64..cc5900ac61b584774de45f10c48906b3ff99de74 100644 (file)
@@ -295,7 +295,7 @@ is_ds_client(struct nfs_client *clp)
 
 extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
 
-extern const u32 nfs4_fattr_bitmap[2];
+extern const u32 nfs4_fattr_bitmap[3];
 extern const u32 nfs4_statfs_bitmap[2];
 extern const u32 nfs4_pathconf_bitmap[2];
 extern const u32 nfs4_fsinfo_bitmap[3];
index d48dbefa0e71ebf6d9ac90edbb893364afd2d0a3..15fc7e4664ed53206074df177efd74e30dd13521 100644 (file)
@@ -105,6 +105,8 @@ static int nfs4_map_errors(int err)
                return -EINVAL;
        case -NFS4ERR_SHARE_DENIED:
                return -EACCES;
+       case -NFS4ERR_MINOR_VERS_MISMATCH:
+               return -EPROTONOSUPPORT;
        default:
                dprintk("%s could not handle NFSv4 error %d\n",
                                __func__, -err);
@@ -116,7 +118,7 @@ static int nfs4_map_errors(int err)
 /*
  * This is our standard bitmap for GETATTR requests.
  */
-const u32 nfs4_fattr_bitmap[2] = {
+const u32 nfs4_fattr_bitmap[3] = {
        FATTR4_WORD0_TYPE
        | FATTR4_WORD0_CHANGE
        | FATTR4_WORD0_SIZE
@@ -133,6 +135,24 @@ const u32 nfs4_fattr_bitmap[2] = {
        | FATTR4_WORD1_TIME_MODIFY
 };
 
+static const u32 nfs4_pnfs_open_bitmap[3] = {
+       FATTR4_WORD0_TYPE
+       | FATTR4_WORD0_CHANGE
+       | FATTR4_WORD0_SIZE
+       | FATTR4_WORD0_FSID
+       | FATTR4_WORD0_FILEID,
+       FATTR4_WORD1_MODE
+       | FATTR4_WORD1_NUMLINKS
+       | FATTR4_WORD1_OWNER
+       | FATTR4_WORD1_OWNER_GROUP
+       | FATTR4_WORD1_RAWDEV
+       | FATTR4_WORD1_SPACE_USED
+       | FATTR4_WORD1_TIME_ACCESS
+       | FATTR4_WORD1_TIME_METADATA
+       | FATTR4_WORD1_TIME_MODIFY,
+       FATTR4_WORD2_MDSTHRESHOLD
+};
+
 const u32 nfs4_statfs_bitmap[2] = {
        FATTR4_WORD0_FILES_AVAIL
        | FATTR4_WORD0_FILES_FREE
@@ -844,6 +864,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        p->o_arg.name = &dentry->d_name;
        p->o_arg.server = server;
        p->o_arg.bitmask = server->attr_bitmask;
+       p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
        p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
        if (attrs != NULL && attrs->ia_valid != 0) {
                __be32 verf[2];
@@ -1820,6 +1841,7 @@ static int _nfs4_do_open(struct inode *dir,
                opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
                if (!opendata->f_attr.mdsthreshold)
                        goto err_opendata_put;
+               opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
        }
        if (dentry->d_inode != NULL)
                opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
@@ -1880,6 +1902,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
        struct nfs4_state *res;
        int status;
 
+       fmode &= FMODE_READ|FMODE_WRITE;
        do {
                status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
                                       &res, ctx_th);
@@ -2526,6 +2549,14 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 
        nfs_fattr_init(fattr);
        
+       /* Deal with open(O_TRUNC) */
+       if (sattr->ia_valid & ATTR_OPEN)
+               sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
+
+       /* Optimization: if the end result is no change, don't RPC */
+       if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
+               return 0;
+
        /* Search for an existing open(O_WRITE) file */
        if (sattr->ia_valid & ATTR_FILE) {
                struct nfs_open_context *ctx;
@@ -2537,10 +2568,6 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
                }
        }
 
-       /* Deal with open(O_TRUNC) */
-       if (sattr->ia_valid & ATTR_OPEN)
-               sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
-
        status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
        if (status == 0)
                nfs_setattr_update_inode(inode, sattr);
@@ -5275,7 +5302,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
 
        status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        if (status)
-               pr_warn("NFS: Got error %d from the server %s on "
+               dprintk("NFS: Got error %d from the server %s on "
                        "DESTROY_CLIENTID.", status, clp->cl_hostname);
        return status;
 }
@@ -5746,8 +5773,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
        status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 
        if (status)
-               printk(KERN_WARNING
-                       "NFS: Got error %d from the server on DESTROY_SESSION. "
+               dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
                        "Session has been destroyed regardless...\n", status);
 
        dprintk("<-- nfs4_proc_destroy_session\n");
index c679b9ecef634c80d4738e3cc2a9624f51c327c2..f38300e9f171646aeb414c704e26bb5302f380fa 100644 (file)
@@ -244,6 +244,16 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
        return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
 }
 
+static void nfs41_finish_session_reset(struct nfs_client *clp)
+{
+       clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+       clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+       /* create_session negotiated new slot table */
+       clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+       clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+       nfs41_setup_state_renewal(clp);
+}
+
 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 {
        int status;
@@ -259,8 +269,7 @@ do_confirm:
        status = nfs4_proc_create_session(clp, cred);
        if (status != 0)
                goto out;
-       clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
-       nfs41_setup_state_renewal(clp);
+       nfs41_finish_session_reset(clp);
        nfs_mark_client_ready(clp, NFS_CS_READY);
 out:
        return status;
@@ -1772,16 +1781,9 @@ static int nfs4_reset_session(struct nfs_client *clp)
                status = nfs4_handle_reclaim_lease_error(clp, status);
                goto out;
        }
-       clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
-       /* create_session negotiated new slot table */
-       clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
-       clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+       nfs41_finish_session_reset(clp);
        dprintk("%s: session reset was successful for server %s!\n",
                        __func__, clp->cl_hostname);
-
-        /* Let the state manager reestablish state */
-       if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
-               nfs41_setup_state_renewal(clp);
 out:
        if (cred)
                put_rpccred(cred);
index ee4a74db95d0b1b7ea49e8dd1263f0504f2fffa8..18fae29b0301c38a09fcb30a1345375780393f5c 100644 (file)
@@ -1198,12 +1198,13 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
 }
 
 static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
+                                const u32 *open_bitmap,
                                 struct compound_hdr *hdr)
 {
        encode_getattr_three(xdr,
-                            bitmask[0] & nfs4_fattr_bitmap[0],
-                            bitmask[1] & nfs4_fattr_bitmap[1],
-                            bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD,
+                            bitmask[0] & open_bitmap[0],
+                            bitmask[1] & open_bitmap[1],
+                            bitmask[2] & open_bitmap[2],
                             hdr);
 }
 
@@ -2221,7 +2222,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_putfh(xdr, args->fh, &hdr);
        encode_open(xdr, args, &hdr);
        encode_getfh(xdr, &hdr);
-       encode_getfattr_open(xdr, args->bitmask, &hdr);
+       encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
        encode_nops(&hdr);
 }
 
@@ -4359,7 +4360,10 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
 
        if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
                return -EIO;
-       if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) {
+       if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+               /* Did the server return an unrequested attribute? */
+               if (unlikely(res == NULL))
+                       return -EREMOTEIO;
                p = xdr_inline_decode(xdr, 4);
                if (unlikely(!p))
                        goto out_overflow;
@@ -4372,6 +4376,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
                                __func__);
 
                status = decode_first_threshold_item4(xdr, res);
+               bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD;
        }
        return status;
 out_overflow:
index b47277baebab92930bee6c1fbac445fd8978a6b9..f50d3e8d6f2230a42cdc656b61004dcf62182dd2 100644 (file)
@@ -454,7 +454,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
        objios->ios->done = _read_done;
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                rdata->args.offset, rdata->args.count);
-       return ore_read(objios->ios);
+       ret = ore_read(objios->ios);
+       if (unlikely(ret))
+               objio_free_result(&objios->oir);
+       return ret;
 }
 
 /*
@@ -486,8 +489,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
        struct nfs_write_data *wdata = objios->oir.rpcdata;
        struct address_space *mapping = wdata->header->inode->i_mapping;
        pgoff_t index = offset / PAGE_SIZE;
-       struct page *page = find_get_page(mapping, index);
+       struct page *page;
+       loff_t i_size = i_size_read(wdata->header->inode);
+
+       if (offset >= i_size) {
+               *uptodate = true;
+               dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
+               return ZERO_PAGE(0);
+       }
 
+       page = find_get_page(mapping, index);
        if (!page) {
                page = find_or_create_page(mapping, index, GFP_NOFS);
                if (unlikely(!page)) {
@@ -507,8 +518,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
 
 static void __r4w_put_page(void *priv, struct page *page)
 {
-       dprintk("%s: index=0x%lx\n", __func__, page->index);
-       page_cache_release(page);
+       dprintk("%s: index=0x%lx\n", __func__,
+               (page == ZERO_PAGE(0)) ? -1UL : page->index);
+       if (ZERO_PAGE(0) != page)
+               page_cache_release(page);
        return;
 }
 
@@ -539,8 +552,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                wdata->args.offset, wdata->args.count);
        ret = ore_write(objios->ios);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               objio_free_result(&objios->oir);
                return ret;
+       }
 
        if (objios->sync)
                _write_done(objios->ios, objios);
index b8323aa7b54384af8f51b84b3077d98b8f22d951..bbc49caa7a82810ac497e7475997262f4da14be4 100644 (file)
@@ -70,6 +70,10 @@ find_pnfs_driver(u32 id)
 
        spin_lock(&pnfs_spinlock);
        local = find_pnfs_driver_locked(id);
+       if (local != NULL && !try_module_get(local->owner)) {
+               dprintk("%s: Could not grab reference on module\n", __func__);
+               local = NULL;
+       }
        spin_unlock(&pnfs_spinlock);
        return local;
 }
@@ -80,6 +84,9 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss)
        if (nfss->pnfs_curr_ld) {
                if (nfss->pnfs_curr_ld->clear_layoutdriver)
                        nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+               /* Decrement the MDS count. Purge the deviceid cache if zero */
+               if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
+                       nfs4_deviceid_purge_client(nfss->nfs_client);
                module_put(nfss->pnfs_curr_ld->owner);
        }
        nfss->pnfs_curr_ld = NULL;
@@ -115,10 +122,6 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
                        goto out_no_driver;
                }
        }
-       if (!try_module_get(ld_type->owner)) {
-               dprintk("%s: Could not grab reference on module\n", __func__);
-               goto out_no_driver;
-       }
        server->pnfs_curr_ld = ld_type;
        if (ld_type->set_layoutdriver
            && ld_type->set_layoutdriver(server, mntfh)) {
@@ -127,6 +130,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
                module_put(ld_type->owner);
                goto out_no_driver;
        }
+       /* Bump the MDS count */
+       atomic_inc(&server->nfs_client->cl_mds_count);
 
        dprintk("%s: pNFS module for %u set\n", __func__, id);
        return;
index 29fd23c0efdcb07c699c5e2e94c1e23dad8de103..64f90d845f6a95cd8752e5b4e1c9b3895f1a1e66 100644 (file)
@@ -365,7 +365,7 @@ static inline bool
 pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
                   struct nfs_server *nfss)
 {
-       return (dst && src && src->bm != 0 &&
+       return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld &&
                                        nfss->pnfs_curr_ld->id == src->l_type);
 }
 
index a706b6bcc286a5a401318e868b0d1fbab2a206a4..617c7419a08ef5d0b107292bb626b84b99916b2d 100644 (file)
@@ -651,7 +651,7 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
                /* Emulate the eof flag, which isn't normally needed in NFSv2
                 * as it is guaranteed to always return the file attributes
                 */
-               if (data->args.offset + data->args.count >= data->res.fattr->size)
+               if (data->args.offset + data->res.count >= data->res.fattr->size)
                        data->res.eof = 1;
        }
        return 0;
index ff656c022684e9e2b0d94587cf9d807d670bd715..06228192f64efb52e8466afd2334bc04952d50a4 100644 (file)
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options,
        if (data == NULL)
                goto out_no_data;
 
+       args->version = NFS_DEFAULT_VERSION;
        switch (data->version) {
        case 1:
                data->namlen = 0;
@@ -2637,6 +2638,8 @@ static int nfs4_validate_mount_data(void *options,
        if (data == NULL)
                goto out_no_data;
 
+       args->version = 4;
+
        switch (data->version) {
        case 1:
                if (data->host_addrlen > sizeof(args->nfs_server.address))
@@ -2857,6 +2860,8 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
 
        dfprintk(MOUNT, "--> nfs4_try_mount()\n");
 
+       mount_info->fill_super = nfs4_fill_super;
+
        export_path = data->nfs_server.export_path;
        data->nfs_server.export_path = "/";
        root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
index e6fe3d69d14cbe0a5b75fc2cc5905c875f4c0181..4d6861c0dc142a5ec41e5da405012b661aeb3bc4 100644 (file)
@@ -80,6 +80,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void)
                INIT_LIST_HEAD(&hdr->rpc_list);
                spin_lock_init(&hdr->lock);
                atomic_set(&hdr->refcnt, 0);
+               hdr->verf = &p->verf;
        }
        return p;
 }
@@ -619,6 +620,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
                        goto next;
                }
                if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+                       memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf));
                        nfs_mark_request_commit(req, hdr->lseg, &cinfo);
                        goto next;
                }
@@ -1255,15 +1257,14 @@ static void nfs_writeback_release_common(void *calldata)
        struct nfs_write_data   *data = calldata;
        struct nfs_pgio_header *hdr = data->header;
        int status = data->task.tk_status;
-       struct nfs_page *req = hdr->req;
 
        if ((status >= 0) && nfs_write_need_commit(data)) {
                spin_lock(&hdr->lock);
                if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
                        ; /* Do nothing */
                else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
-                       memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-               else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf)))
+                       memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf));
+               else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf)))
                        set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
                spin_unlock(&hdr->lock);
        }
index 204438cc914ea522b83907aaf618bb0dbcfa4068..34a10d78b839f4c73b3d851e19820bc712129f36 100644 (file)
@@ -11,7 +11,7 @@ int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
        struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
 
        for (f = exp->ex_flavors; f < end; f++) {
-               if (f->pseudoflavor == rqstp->rq_flavor)
+               if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
                        return f->flags;
        }
        return exp->ex_flags;
index 8e9689abbc0c7594fb6aa6cb7fb7735018165162..ba233499b9a5fc1b374bc7d79ad8f636f01135b0 100644 (file)
 #include <linux/namei.h>
 #include <linux/module.h>
 #include <linux/exportfs.h>
+#include <linux/sunrpc/svc_xprt.h>
 
 #include <net/ipv6.h>
 
 #include "nfsd.h"
 #include "nfsfh.h"
+#include "netns.h"
 
 #define NFSDDBG_FACILITY       NFSDDBG_EXPORT
 
@@ -38,7 +40,6 @@ typedef struct svc_export     svc_export;
 #define        EXPKEY_HASHBITS         8
 #define        EXPKEY_HASHMAX          (1 << EXPKEY_HASHBITS)
 #define        EXPKEY_HASHMASK         (EXPKEY_HASHMAX -1)
-static struct cache_head *expkey_table[EXPKEY_HASHMAX];
 
 static void expkey_put(struct kref *ref)
 {
@@ -71,9 +72,9 @@ static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
        return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
 }
 
-static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
-static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
-static struct cache_detail svc_expkey_cache;
+static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+                                           struct svc_expkey *old);
+static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
 
 static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
 {
@@ -131,7 +132,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
        key.ek_fsidtype = fsidtype;
        memcpy(key.ek_fsid, buf, len);
 
-       ek = svc_expkey_lookup(&key);
+       ek = svc_expkey_lookup(cd, &key);
        err = -ENOMEM;
        if (!ek)
                goto out;
@@ -145,7 +146,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
        err = 0;
        if (len == 0) {
                set_bit(CACHE_NEGATIVE, &key.h.flags);
-               ek = svc_expkey_update(&key, ek);
+               ek = svc_expkey_update(cd, &key, ek);
                if (!ek)
                        err = -ENOMEM;
        } else {
@@ -155,7 +156,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
 
                dprintk("Found the path %s\n", buf);
 
-               ek = svc_expkey_update(&key, ek);
+               ek = svc_expkey_update(cd, &key, ek);
                if (!ek)
                        err = -ENOMEM;
                path_put(&key.ek_path);
@@ -163,7 +164,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
        cache_flush();
  out:
        if (ek)
-               cache_put(&ek->h, &svc_expkey_cache);
+               cache_put(&ek->h, cd);
        if (dom)
                auth_domain_put(dom);
        kfree(buf);
@@ -239,10 +240,9 @@ static struct cache_head *expkey_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_expkey_cache = {
+static struct cache_detail svc_expkey_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPKEY_HASHMAX,
-       .hash_table     = expkey_table,
        .name           = "nfsd.fh",
        .cache_put      = expkey_put,
        .cache_upcall   = expkey_upcall,
@@ -268,13 +268,12 @@ svc_expkey_hash(struct svc_expkey *item)
 }
 
 static struct svc_expkey *
-svc_expkey_lookup(struct svc_expkey *item)
+svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
 {
        struct cache_head *ch;
        int hash = svc_expkey_hash(item);
 
-       ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
-                                hash);
+       ch = sunrpc_cache_lookup(cd, &item->h, hash);
        if (ch)
                return container_of(ch, struct svc_expkey, h);
        else
@@ -282,13 +281,13 @@ svc_expkey_lookup(struct svc_expkey *item)
 }
 
 static struct svc_expkey *
-svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
+svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+                 struct svc_expkey *old)
 {
        struct cache_head *ch;
        int hash = svc_expkey_hash(new);
 
-       ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
-                                &old->h, hash);
+       ch = sunrpc_cache_update(cd, &new->h, &old->h, hash);
        if (ch)
                return container_of(ch, struct svc_expkey, h);
        else
@@ -299,8 +298,6 @@ svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
 #define        EXPORT_HASHBITS         8
 #define        EXPORT_HASHMAX          (1<< EXPORT_HASHBITS)
 
-static struct cache_head *export_table[EXPORT_HASHMAX];
-
 static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
 {
        int i;
@@ -525,6 +522,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
                goto out1;
 
        exp.ex_client = dom;
+       exp.cd = cd;
 
        /* expiry */
        err = -EINVAL;
@@ -672,6 +670,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
        new->ex_fslocs.locations = NULL;
        new->ex_fslocs.locations_count = 0;
        new->ex_fslocs.migrated = 0;
+       new->cd = item->cd;
 }
 
 static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -707,10 +706,9 @@ static struct cache_head *svc_export_alloc(void)
                return NULL;
 }
 
-struct cache_detail svc_export_cache = {
+static struct cache_detail svc_export_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPORT_HASHMAX,
-       .hash_table     = export_table,
        .name           = "nfsd.export",
        .cache_put      = svc_export_put,
        .cache_upcall   = svc_export_upcall,
@@ -739,8 +737,7 @@ svc_export_lookup(struct svc_export *exp)
        struct cache_head *ch;
        int hash = svc_export_hash(exp);
 
-       ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
-                                hash);
+       ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash);
        if (ch)
                return container_of(ch, struct svc_export, h);
        else
@@ -753,9 +750,7 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
        struct cache_head *ch;
        int hash = svc_export_hash(old);
 
-       ch = sunrpc_cache_update(&svc_export_cache, &new->h,
-                                &old->h,
-                                hash);
+       ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash);
        if (ch)
                return container_of(ch, struct svc_export, h);
        else
@@ -764,7 +759,8 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
 
 
 static struct svc_expkey *
-exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
+exp_find_key(struct cache_detail *cd, svc_client *clp, int fsid_type,
+            u32 *fsidv, struct cache_req *reqp)
 {
        struct svc_expkey key, *ek;
        int err;
@@ -776,18 +772,18 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
        key.ek_fsidtype = fsid_type;
        memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
 
-       ek = svc_expkey_lookup(&key);
+       ek = svc_expkey_lookup(cd, &key);
        if (ek == NULL)
                return ERR_PTR(-ENOMEM);
-       err = cache_check(&svc_expkey_cache, &ek->h, reqp);
+       err = cache_check(cd, &ek->h, reqp);
        if (err)
                return ERR_PTR(err);
        return ek;
 }
 
 
-static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
-                                    struct cache_req *reqp)
+static svc_export *exp_get_by_name(struct cache_detail *cd, svc_client *clp,
+                                  const struct path *path, struct cache_req *reqp)
 {
        struct svc_export *exp, key;
        int err;
@@ -797,11 +793,12 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
 
        key.ex_client = clp;
        key.ex_path = *path;
+       key.cd = cd;
 
        exp = svc_export_lookup(&key);
        if (exp == NULL)
                return ERR_PTR(-ENOMEM);
-       err = cache_check(&svc_export_cache, &exp->h, reqp);
+       err = cache_check(cd, &exp->h, reqp);
        if (err)
                return ERR_PTR(err);
        return exp;
@@ -810,16 +807,17 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
 /*
  * Find the export entry for a given dentry.
  */
-static struct svc_export *exp_parent(svc_client *clp, struct path *path)
+static struct svc_export *exp_parent(struct cache_detail *cd, svc_client *clp,
+                                    struct path *path)
 {
        struct dentry *saved = dget(path->dentry);
-       svc_export *exp = exp_get_by_name(clp, path, NULL);
+       svc_export *exp = exp_get_by_name(cd, clp, path, NULL);
 
        while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) {
                struct dentry *parent = dget_parent(path->dentry);
                dput(path->dentry);
                path->dentry = parent;
-               exp = exp_get_by_name(clp, path, NULL);
+               exp = exp_get_by_name(cd, clp, path, NULL);
        }
        dput(path->dentry);
        path->dentry = saved;
@@ -834,13 +832,16 @@ static struct svc_export *exp_parent(svc_client *clp, struct path *path)
  * since its harder to fool a kernel module than a user space program.
  */
 int
-exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
+exp_rootfh(struct net *net, svc_client *clp, char *name,
+          struct knfsd_fh *f, int maxsize)
 {
        struct svc_export       *exp;
        struct path             path;
        struct inode            *inode;
        struct svc_fh           fh;
        int                     err;
+       struct nfsd_net         *nn = net_generic(net, nfsd_net_id);
+       struct cache_detail     *cd = nn->svc_export_cache;
 
        err = -EPERM;
        /* NB: we probably ought to check that it's NUL-terminated */
@@ -853,7 +854,7 @@ exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
        dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
                 name, path.dentry, clp->name,
                 inode->i_sb->s_id, inode->i_ino);
-       exp = exp_parent(clp, &path);
+       exp = exp_parent(cd, clp, &path);
        if (IS_ERR(exp)) {
                err = PTR_ERR(exp);
                goto out;
@@ -875,16 +876,18 @@ out:
        return err;
 }
 
-static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
+static struct svc_export *exp_find(struct cache_detail *cd,
+                                  struct auth_domain *clp, int fsid_type,
                                   u32 *fsidv, struct cache_req *reqp)
 {
        struct svc_export *exp;
-       struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
+       struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id);
+       struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp);
        if (IS_ERR(ek))
                return ERR_CAST(ek);
 
-       exp = exp_get_by_name(clp, &ek->ek_path, reqp);
-       cache_put(&ek->h, &svc_expkey_cache);
+       exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp);
+       cache_put(&ek->h, nn->svc_expkey_cache);
 
        if (IS_ERR(exp))
                return ERR_CAST(exp);
@@ -901,13 +904,13 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
                return 0;
        /* ip-address based client; check sec= export option: */
        for (f = exp->ex_flavors; f < end; f++) {
-               if (f->pseudoflavor == rqstp->rq_flavor)
+               if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
                        return 0;
        }
        /* defaults in absence of sec= options: */
        if (exp->ex_nflavors == 0) {
-               if (rqstp->rq_flavor == RPC_AUTH_NULL ||
-                   rqstp->rq_flavor == RPC_AUTH_UNIX)
+               if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
+                   rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
                        return 0;
        }
        return nfserr_wrongsec;
@@ -926,12 +929,14 @@ struct svc_export *
 rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
 {
        struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+       struct cache_detail *cd = nn->svc_export_cache;
 
        if (rqstp->rq_client == NULL)
                goto gss;
 
        /* First try the auth_unix client: */
-       exp = exp_get_by_name(rqstp->rq_client, path, &rqstp->rq_chandle);
+       exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle);
        if (PTR_ERR(exp) == -ENOENT)
                goto gss;
        if (IS_ERR(exp))
@@ -943,7 +948,7 @@ gss:
        /* Otherwise, try falling back on gss client */
        if (rqstp->rq_gssclient == NULL)
                return exp;
-       gssexp = exp_get_by_name(rqstp->rq_gssclient, path, &rqstp->rq_chandle);
+       gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle);
        if (PTR_ERR(gssexp) == -ENOENT)
                return exp;
        if (!IS_ERR(exp))
@@ -955,12 +960,15 @@ struct svc_export *
 rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
 {
        struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+       struct cache_detail *cd = nn->svc_export_cache;
 
        if (rqstp->rq_client == NULL)
                goto gss;
 
        /* First try the auth_unix client: */
-       exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle);
+       exp = exp_find(cd, rqstp->rq_client, fsid_type,
+                      fsidv, &rqstp->rq_chandle);
        if (PTR_ERR(exp) == -ENOENT)
                goto gss;
        if (IS_ERR(exp))
@@ -972,7 +980,7 @@ gss:
        /* Otherwise, try falling back on gss client */
        if (rqstp->rq_gssclient == NULL)
                return exp;
-       gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv,
+       gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
                                                &rqstp->rq_chandle);
        if (PTR_ERR(gssexp) == -ENOENT)
                return exp;
@@ -1029,13 +1037,15 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
 /* Iterator */
 
 static void *e_start(struct seq_file *m, loff_t *pos)
-       __acquires(svc_export_cache.hash_lock)
+       __acquires(((struct cache_detail *)m->private)->hash_lock)
 {
        loff_t n = *pos;
        unsigned hash, export;
        struct cache_head *ch;
-       
-       read_lock(&svc_export_cache.hash_lock);
+       struct cache_detail *cd = m->private;
+       struct cache_head **export_table = cd->hash_table;
+
+       read_lock(&cd->hash_lock);
        if (!n--)
                return SEQ_START_TOKEN;
        hash = n >> 32;
@@ -1060,6 +1070,8 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
 {
        struct cache_head *ch = p;
        int hash = (*pos >> 32);
+       struct cache_detail *cd = m->private;
+       struct cache_head **export_table = cd->hash_table;
 
        if (p == SEQ_START_TOKEN)
                hash = 0;
@@ -1082,9 +1094,11 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
 }
 
 static void e_stop(struct seq_file *m, void *p)
-       __releases(svc_export_cache.hash_lock)
+       __releases(((struct cache_detail *)m->private)->hash_lock)
 {
-       read_unlock(&svc_export_cache.hash_lock);
+       struct cache_detail *cd = m->private;
+
+       read_unlock(&cd->hash_lock);
 }
 
 static struct flags {
@@ -1195,6 +1209,7 @@ static int e_show(struct seq_file *m, void *p)
 {
        struct cache_head *cp = p;
        struct svc_export *exp = container_of(cp, struct svc_export, h);
+       struct cache_detail *cd = m->private;
 
        if (p == SEQ_START_TOKEN) {
                seq_puts(m, "# Version 1.1\n");
@@ -1203,10 +1218,10 @@ static int e_show(struct seq_file *m, void *p)
        }
 
        cache_get(&exp->h);
-       if (cache_check(&svc_export_cache, &exp->h, NULL))
+       if (cache_check(cd, &exp->h, NULL))
                return 0;
-       cache_put(&exp->h, &svc_export_cache);
-       return svc_export_show(m, &svc_export_cache, cp);
+       exp_put(exp);
+       return svc_export_show(m, cd, cp);
 }
 
 const struct seq_operations nfs_exports_op = {
@@ -1216,48 +1231,70 @@ const struct seq_operations nfs_exports_op = {
        .show   = e_show,
 };
 
-
 /*
  * Initialize the exports module.
  */
 int
-nfsd_export_init(void)
+nfsd_export_init(struct net *net)
 {
        int rv;
-       dprintk("nfsd: initializing export module.\n");
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+       dprintk("nfsd: initializing export module (net: %p).\n", net);
 
-       rv = cache_register_net(&svc_export_cache, &init_net);
+       nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
+       if (IS_ERR(nn->svc_export_cache))
+               return PTR_ERR(nn->svc_export_cache);
+       rv = cache_register_net(nn->svc_export_cache, net);
        if (rv)
-               return rv;
-       rv = cache_register_net(&svc_expkey_cache, &init_net);
+               goto destroy_export_cache;
+
+       nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net);
+       if (IS_ERR(nn->svc_expkey_cache)) {
+               rv = PTR_ERR(nn->svc_expkey_cache);
+               goto unregister_export_cache;
+       }
+       rv = cache_register_net(nn->svc_expkey_cache, net);
        if (rv)
-               cache_unregister_net(&svc_export_cache, &init_net);
-       return rv;
+               goto destroy_expkey_cache;
+       return 0;
 
+destroy_expkey_cache:
+       cache_destroy_net(nn->svc_expkey_cache, net);
+unregister_export_cache:
+       cache_unregister_net(nn->svc_export_cache, net);
+destroy_export_cache:
+       cache_destroy_net(nn->svc_export_cache, net);
+       return rv;
 }
 
 /*
  * Flush exports table - called when last nfsd thread is killed
  */
 void
-nfsd_export_flush(void)
+nfsd_export_flush(struct net *net)
 {
-       cache_purge(&svc_expkey_cache);
-       cache_purge(&svc_export_cache);
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+       cache_purge(nn->svc_expkey_cache);
+       cache_purge(nn->svc_export_cache);
 }
 
 /*
  * Shutdown the exports module.
  */
 void
-nfsd_export_shutdown(void)
+nfsd_export_shutdown(struct net *net)
 {
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: shutting down export module.\n");
+       dprintk("nfsd: shutting down export module (net: %p).\n", net);
 
-       cache_unregister_net(&svc_expkey_cache, &init_net);
-       cache_unregister_net(&svc_export_cache, &init_net);
-       svcauth_unix_purge();
+       cache_unregister_net(nn->svc_expkey_cache, net);
+       cache_unregister_net(nn->svc_export_cache, net);
+       cache_destroy_net(nn->svc_expkey_cache, net);
+       cache_destroy_net(nn->svc_export_cache, net);
+       svcauth_unix_purge(net);
 
-       dprintk("nfsd: export shutdown complete.\n");
+       dprintk("nfsd: export shutdown complete (net: %p).\n", net);
 }
index 9559ce468732e7c00ae40cd4fc3a379a525ecef7..e6c38159622fe6bc337f3d24ada7db683838ceac 100644 (file)
@@ -58,6 +58,7 @@ static int nfsd_inject_set(void *op_ptr, u64 val)
 
 static int nfsd_inject_get(void *data, u64 *val)
 {
+       *val = 0;
        return 0;
 }
 
index 2f3be1321534375b65cac5705806edda29c6088d..9d513efc01baad65a0807284082315c3d5a68206 100644 (file)
 #define IDMAP_NAMESZ 128
 
 #ifdef CONFIG_NFSD_V4
-int nfsd_idmap_init(void);
-void nfsd_idmap_shutdown(void);
+int nfsd_idmap_init(struct net *);
+void nfsd_idmap_shutdown(struct net *);
 #else
-static inline int nfsd_idmap_init(void)
+static inline int nfsd_idmap_init(struct net *net)
 {
        return 0;
 }
-static inline void nfsd_idmap_shutdown(void)
+static inline void nfsd_idmap_shutdown(struct net *net)
 {
 }
 #endif
index 12e0cff435b43c06689cab12763a5890942cd37d..39365636b244fbfc7aaac3a794f83f87d7ea69a6 100644 (file)
@@ -28,6 +28,12 @@ struct cld_net;
 
 struct nfsd_net {
        struct cld_net *cld_net;
+
+       struct cache_detail *svc_expkey_cache;
+       struct cache_detail *svc_export_cache;
+
+       struct cache_detail *idtoname_cache;
+       struct cache_detail *nametoid_cache;
 };
 
 extern int nfsd_net_id;
index c8e9f637153ab3e44ba293f7097e7b32e77d4e54..a5fd6b982f277ce648bbd528947964ea2ef63c73 100644 (file)
@@ -650,9 +650,10 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
        struct rpc_clnt *client;
 
        if (clp->cl_minorversion == 0) {
-               if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+               if (!clp->cl_cred.cr_principal &&
+                               (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
                        return -EINVAL;
-               args.client_name = clp->cl_principal;
+               args.client_name = clp->cl_cred.cr_principal;
                args.prognumber = conn->cb_prog,
                args.protocol = XPRT_TRANSPORT_TCP;
                args.authflavor = clp->cl_flavor;
index 322d11ce06a452858ed0e547cf1e70e7883c08ee..dae36f1dee95e68defce943bedf01efc46d61a54 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/sunrpc/svc_xprt.h>
 #include <net/net_namespace.h>
 #include "idmap.h"
 #include "nfsd.h"
+#include "netns.h"
 
 /*
  * Turn off idmapping when using AUTH_SYS.
@@ -107,8 +109,6 @@ ent_alloc(void)
  * ID -> Name cache
  */
 
-static struct cache_head *idtoname_table[ENT_HASHMAX];
-
 static uint32_t
 idtoname_hash(struct ent *ent)
 {
@@ -183,13 +183,13 @@ warn_no_idmapd(struct cache_detail *detail, int has_died)
 
 
 static int         idtoname_parse(struct cache_detail *, char *, int);
-static struct ent *idtoname_lookup(struct ent *);
-static struct ent *idtoname_update(struct ent *, struct ent *);
+static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
+static struct ent *idtoname_update(struct cache_detail *, struct ent *,
+                                  struct ent *);
 
-static struct cache_detail idtoname_cache = {
+static struct cache_detail idtoname_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
-       .hash_table     = idtoname_table,
        .name           = "nfs4.idtoname",
        .cache_put      = ent_put,
        .cache_upcall   = idtoname_upcall,
@@ -244,7 +244,7 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
                goto out;
 
        error = -ENOMEM;
-       res = idtoname_lookup(&ent);
+       res = idtoname_lookup(cd, &ent);
        if (!res)
                goto out;
 
@@ -260,11 +260,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
        else
                memcpy(ent.name, buf1, sizeof(ent.name));
        error = -ENOMEM;
-       res = idtoname_update(&ent, res);
+       res = idtoname_update(cd, &ent, res);
        if (res == NULL)
                goto out;
 
-       cache_put(&res->h, &idtoname_cache);
+       cache_put(&res->h, cd);
 
        error = 0;
 out:
@@ -275,10 +275,9 @@ out:
 
 
 static struct ent *
-idtoname_lookup(struct ent *item)
+idtoname_lookup(struct cache_detail *cd, struct ent *item)
 {
-       struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache,
-                                                   &item->h,
+       struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
                                                    idtoname_hash(item));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -287,10 +286,9 @@ idtoname_lookup(struct ent *item)
 }
 
 static struct ent *
-idtoname_update(struct ent *new, struct ent *old)
+idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old)
 {
-       struct cache_head *ch = sunrpc_cache_update(&idtoname_cache,
-                                                   &new->h, &old->h,
+       struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
                                                    idtoname_hash(new));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -303,8 +301,6 @@ idtoname_update(struct ent *new, struct ent *old)
  * Name -> ID cache
  */
 
-static struct cache_head *nametoid_table[ENT_HASHMAX];
-
 static inline int
 nametoid_hash(struct ent *ent)
 {
@@ -359,14 +355,14 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
        return 0;
 }
 
-static struct ent *nametoid_lookup(struct ent *);
-static struct ent *nametoid_update(struct ent *, struct ent *);
+static struct ent *nametoid_lookup(struct cache_detail *, struct ent *);
+static struct ent *nametoid_update(struct cache_detail *, struct ent *,
+                                  struct ent *);
 static int         nametoid_parse(struct cache_detail *, char *, int);
 
-static struct cache_detail nametoid_cache = {
+static struct cache_detail nametoid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
-       .hash_table     = nametoid_table,
        .name           = "nfs4.nametoid",
        .cache_put      = ent_put,
        .cache_upcall   = nametoid_upcall,
@@ -426,14 +422,14 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
                set_bit(CACHE_NEGATIVE, &ent.h.flags);
 
        error = -ENOMEM;
-       res = nametoid_lookup(&ent);
+       res = nametoid_lookup(cd, &ent);
        if (res == NULL)
                goto out;
-       res = nametoid_update(&ent, res);
+       res = nametoid_update(cd, &ent, res);
        if (res == NULL)
                goto out;
 
-       cache_put(&res->h, &nametoid_cache);
+       cache_put(&res->h, cd);
        error = 0;
 out:
        kfree(buf1);
@@ -443,10 +439,9 @@ out:
 
 
 static struct ent *
-nametoid_lookup(struct ent *item)
+nametoid_lookup(struct cache_detail *cd, struct ent *item)
 {
-       struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache,
-                                                   &item->h,
+       struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
                                                    nametoid_hash(item));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -455,10 +450,9 @@ nametoid_lookup(struct ent *item)
 }
 
 static struct ent *
-nametoid_update(struct ent *new, struct ent *old)
+nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old)
 {
-       struct cache_head *ch = sunrpc_cache_update(&nametoid_cache,
-                                                   &new->h, &old->h,
+       struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
                                                    nametoid_hash(new));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -471,34 +465,55 @@ nametoid_update(struct ent *new, struct ent *old)
  */
 
 int
-nfsd_idmap_init(void)
+nfsd_idmap_init(struct net *net)
 {
        int rv;
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       rv = cache_register_net(&idtoname_cache, &init_net);
+       nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net);
+       if (IS_ERR(nn->idtoname_cache))
+               return PTR_ERR(nn->idtoname_cache);
+       rv = cache_register_net(nn->idtoname_cache, net);
        if (rv)
-               return rv;
-       rv = cache_register_net(&nametoid_cache, &init_net);
+               goto destroy_idtoname_cache;
+       nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net);
+       if (IS_ERR(nn->nametoid_cache)) {
+               rv = PTR_ERR(nn->idtoname_cache);
+               goto unregister_idtoname_cache;
+       }
+       rv = cache_register_net(nn->nametoid_cache, net);
        if (rv)
-               cache_unregister_net(&idtoname_cache, &init_net);
+               goto destroy_nametoid_cache;
+       return 0;
+
+destroy_nametoid_cache:
+       cache_destroy_net(nn->nametoid_cache, net);
+unregister_idtoname_cache:
+       cache_unregister_net(nn->idtoname_cache, net);
+destroy_idtoname_cache:
+       cache_destroy_net(nn->idtoname_cache, net);
        return rv;
 }
 
 void
-nfsd_idmap_shutdown(void)
+nfsd_idmap_shutdown(struct net *net)
 {
-       cache_unregister_net(&idtoname_cache, &init_net);
-       cache_unregister_net(&nametoid_cache, &init_net);
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+       cache_unregister_net(nn->idtoname_cache, net);
+       cache_unregister_net(nn->nametoid_cache, net);
+       cache_destroy_net(nn->idtoname_cache, net);
+       cache_destroy_net(nn->nametoid_cache, net);
 }
 
 static int
 idmap_lookup(struct svc_rqst *rqstp,
-               struct ent *(*lookup_fn)(struct ent *), struct ent *key,
-               struct cache_detail *detail, struct ent **item)
+               struct ent *(*lookup_fn)(struct cache_detail *, struct ent *),
+               struct ent *key, struct cache_detail *detail, struct ent **item)
 {
        int ret;
 
-       *item = lookup_fn(key);
+       *item = lookup_fn(detail, key);
        if (!*item)
                return -ENOMEM;
  retry:
@@ -506,7 +521,7 @@ idmap_lookup(struct svc_rqst *rqstp,
 
        if (ret == -ETIMEDOUT) {
                struct ent *prev_item = *item;
-               *item = lookup_fn(key);
+               *item = lookup_fn(detail, key);
                if (*item != prev_item)
                        goto retry;
                cache_put(&(*item)->h, detail);
@@ -531,19 +546,20 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
                .type = type,
        };
        int ret;
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
 
        if (namelen + 1 > sizeof(key.name))
                return nfserr_badowner;
        memcpy(key.name, name, namelen);
        key.name[namelen] = '\0';
        strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
-       ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
+       ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item);
        if (ret == -ENOENT)
                return nfserr_badowner;
        if (ret)
                return nfserrno(ret);
        *id = item->id;
-       cache_put(&item->h, &nametoid_cache);
+       cache_put(&item->h, nn->nametoid_cache);
        return 0;
 }
 
@@ -555,9 +571,10 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
                .type = type,
        };
        int ret;
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
 
        strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
-       ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
+       ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
        if (ret == -ENOENT)
                return sprintf(name, "%u", id);
        if (ret)
@@ -565,7 +582,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
        ret = strlen(item->name);
        BUG_ON(ret > IDMAP_NAMESZ);
        memcpy(name, item->name, ret);
-       cache_put(&item->h, &idtoname_cache);
+       cache_put(&item->h, nn->idtoname_cache);
        return ret;
 }
 
@@ -588,7 +605,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
 static __be32
 do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
 {
-       if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+       if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
                if (numeric_name_to_id(rqstp, type, name, namelen, id))
                        return 0;
                /*
@@ -601,7 +618,7 @@ do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u
 static int
 do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
 {
-       if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+       if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
                return sprintf(name, "%u", id);
        return idmap_id_to_name(rqstp, type, id, name);
 }
index ed3f9206a0ee87c914f133492f1f6011775bdef8..5ff0b7b9fc08f22f39cc1f2d83062baceb773bdc 100644 (file)
@@ -570,7 +570,7 @@ static ssize_t
 cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 {
        struct cld_upcall *tmp, *cup;
-       struct cld_msg *cmsg = (struct cld_msg *)src;
+       struct cld_msg __user *cmsg = (struct cld_msg __user *)src;
        uint32_t xid;
        struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info,
                                                nfsd_net_id);
@@ -1029,7 +1029,7 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
        return ret;
 }
 
-struct notifier_block nfsd4_cld_block = {
+static struct notifier_block nfsd4_cld_block = {
        .notifier_call = rpc_pipefs_event,
 };
 
index 7f71c69cdcdfdcbd7245a71820b9f13e1a0bb135..94effd5bc4a107086ef53515ac2bb9dc39950b88 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/sunrpc/clnt.h>
 #include "xdr4.h"
 #include "vfs.h"
+#include "current_stateid.h"
 
 #define NFSDDBG_FACILITY                NFSDDBG_PROC
 
@@ -447,37 +448,69 @@ static struct list_head close_lru;
  *
  * which we should reject.
  */
-static void
-set_access(unsigned int *access, unsigned long bmap) {
+static unsigned int
+bmap_to_share_mode(unsigned long bmap) {
        int i;
+       unsigned int access = 0;
 
-       *access = 0;
        for (i = 1; i < 4; i++) {
                if (test_bit(i, &bmap))
-                       *access |= i;
-       }
-}
-
-static void
-set_deny(unsigned int *deny, unsigned long bmap) {
-       int i;
-
-       *deny = 0;
-       for (i = 0; i < 4; i++) {
-               if (test_bit(i, &bmap))
-                       *deny |= i ;
+                       access |= i;
        }
+       return access;
 }
 
-static int
+static bool
 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
        unsigned int access, deny;
 
-       set_access(&access, stp->st_access_bmap);
-       set_deny(&deny, stp->st_deny_bmap);
+       access = bmap_to_share_mode(stp->st_access_bmap);
+       deny = bmap_to_share_mode(stp->st_deny_bmap);
        if ((access & open->op_share_deny) || (deny & open->op_share_access))
-               return 0;
-       return 1;
+               return false;
+       return true;
+}
+
+/* set share access for a given stateid */
+static inline void
+set_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __set_bit(access, &stp->st_access_bmap);
+}
+
+/* clear share access for a given stateid */
+static inline void
+clear_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __clear_bit(access, &stp->st_access_bmap);
+}
+
+/* test whether a given stateid has access */
+static inline bool
+test_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+       return test_bit(access, &stp->st_access_bmap);
+}
+
+/* set share deny for a given stateid */
+static inline void
+set_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __set_bit(access, &stp->st_deny_bmap);
+}
+
+/* clear share deny for a given stateid */
+static inline void
+clear_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __clear_bit(access, &stp->st_deny_bmap);
+}
+
+/* test whether a given stateid is denying specific access */
+static inline bool
+test_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+       return test_bit(access, &stp->st_deny_bmap);
 }
 
 static int nfs4_access_to_omode(u32 access)
@@ -493,6 +526,20 @@ static int nfs4_access_to_omode(u32 access)
        BUG();
 }
 
+/* release all access and file references for a given stateid */
+static void
+release_all_access(struct nfs4_ol_stateid *stp)
+{
+       int i;
+
+       for (i = 1; i < 4; i++) {
+               if (test_access(i, stp))
+                       nfs4_file_put_access(stp->st_file,
+                                            nfs4_access_to_omode(i));
+               clear_access(i, stp);
+       }
+}
+
 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
 {
        list_del(&stp->st_perfile);
@@ -501,16 +548,7 @@ static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
 
 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
 {
-       int i;
-
-       if (stp->st_access_bmap) {
-               for (i = 1; i < 4; i++) {
-                       if (test_bit(i, &stp->st_access_bmap))
-                               nfs4_file_put_access(stp->st_file,
-                                               nfs4_access_to_omode(i));
-                       __clear_bit(i, &stp->st_access_bmap);
-               }
-       }
+       release_all_access(stp);
        put_nfs4_file(stp->st_file);
        stp->st_file = NULL;
 }
@@ -862,7 +900,7 @@ static void free_session(struct kref *kref)
        struct nfsd4_session *ses;
        int mem;
 
-       BUG_ON(!spin_is_locked(&client_lock));
+       lockdep_assert_held(&client_lock);
        ses = container_of(kref, struct nfsd4_session, se_ref);
        nfsd4_del_conns(ses);
        spin_lock(&nfsd_drc_lock);
@@ -885,7 +923,7 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
        struct nfsd4_session *new;
        struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
        int numslots, slotsize;
-       int status;
+       __be32 status;
        int idx;
 
        /*
@@ -984,7 +1022,8 @@ static inline void
 renew_client_locked(struct nfs4_client *clp)
 {
        if (is_client_expired(clp)) {
-               dprintk("%s: client (clientid %08x/%08x) already expired\n",
+               WARN_ON(1);
+               printk("%s: client (clientid %08x/%08x) already expired\n",
                        __func__,
                        clp->cl_clientid.cl_boot,
                        clp->cl_clientid.cl_id);
@@ -1041,7 +1080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
 static inline void
 free_client(struct nfs4_client *clp)
 {
-       BUG_ON(!spin_is_locked(&client_lock));
+       lockdep_assert_held(&client_lock);
        while (!list_empty(&clp->cl_sessions)) {
                struct nfsd4_session *ses;
                ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
@@ -1049,9 +1088,7 @@ free_client(struct nfs4_client *clp)
                list_del(&ses->se_perclnt);
                nfsd4_put_session_locked(ses);
        }
-       if (clp->cl_cred.cr_group_info)
-               put_group_info(clp->cl_cred.cr_group_info);
-       kfree(clp->cl_principal);
+       free_svc_cred(&clp->cl_cred);
        kfree(clp->cl_name.data);
        kfree(clp);
 }
@@ -1132,12 +1169,21 @@ static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
        target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
 }
 
-static void copy_cred(struct svc_cred *target, struct svc_cred *source)
+static int copy_cred(struct svc_cred *target, struct svc_cred *source)
 {
+       if (source->cr_principal) {
+               target->cr_principal =
+                               kstrdup(source->cr_principal, GFP_KERNEL);
+               if (target->cr_principal == NULL)
+                       return -ENOMEM;
+       } else
+               target->cr_principal = NULL;
+       target->cr_flavor = source->cr_flavor;
        target->cr_uid = source->cr_uid;
        target->cr_gid = source->cr_gid;
        target->cr_group_info = source->cr_group_info;
        get_group_info(target->cr_group_info);
+       return 0;
 }
 
 static int same_name(const char *n1, const char *n2)
@@ -1157,11 +1203,31 @@ same_clid(clientid_t *cl1, clientid_t *cl2)
        return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
 }
 
-/* XXX what about NGROUP */
+static bool groups_equal(struct group_info *g1, struct group_info *g2)
+{
+       int i;
+
+       if (g1->ngroups != g2->ngroups)
+               return false;
+       for (i=0; i<g1->ngroups; i++)
+               if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
+                       return false;
+       return true;
+}
+
 static int
 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
 {
-       return cr1->cr_uid == cr2->cr_uid;
+       if ((cr1->cr_flavor != cr2->cr_flavor)
+               || (cr1->cr_uid != cr2->cr_uid)
+               || (cr1->cr_gid != cr2->cr_gid)
+               || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
+               return false;
+       if (cr1->cr_principal == cr2->cr_principal)
+               return true;
+       if (!cr1->cr_principal || !cr2->cr_principal)
+               return false;
+       return 0 == strcmp(cr1->cr_principal, cr1->cr_principal);
 }
 
 static void gen_clid(struct nfs4_client *clp)
@@ -1204,25 +1270,20 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
 {
        struct nfs4_client *clp;
        struct sockaddr *sa = svc_addr(rqstp);
-       char *princ;
+       int ret;
 
        clp = alloc_client(name);
        if (clp == NULL)
                return NULL;
 
        INIT_LIST_HEAD(&clp->cl_sessions);
-
-       princ = svc_gss_principal(rqstp);
-       if (princ) {
-               clp->cl_principal = kstrdup(princ, GFP_KERNEL);
-               if (clp->cl_principal == NULL) {
-                       spin_lock(&client_lock);
-                       free_client(clp);
-                       spin_unlock(&client_lock);
-                       return NULL;
-               }
+       ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
+       if (ret) {
+               spin_lock(&client_lock);
+               free_client(clp);
+               spin_unlock(&client_lock);
+               return NULL;
        }
-
        idr_init(&clp->cl_stateids);
        memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
        atomic_set(&clp->cl_refcount, 0);
@@ -1240,8 +1301,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
        rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        copy_verf(clp, verf);
        rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
-       clp->cl_flavor = rqstp->rq_flavor;
-       copy_cred(&clp->cl_cred, &rqstp->rq_cred);
        gen_confirm(clp);
        clp->cl_cb_session = NULL;
        return clp;
@@ -1470,18 +1529,32 @@ nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
        clid->flags = new->cl_exchange_flags;
 }
 
+static bool client_has_state(struct nfs4_client *clp)
+{
+       /*
+        * Note clp->cl_openowners check isn't quite right: there's no
+        * need to count owners without stateid's.
+        *
+        * Also note we should probably be using this in 4.0 case too.
+        */
+       return !list_empty(&clp->cl_openowners)
+               || !list_empty(&clp->cl_delegations)
+               || !list_empty(&clp->cl_sessions);
+}
+
 __be32
 nfsd4_exchange_id(struct svc_rqst *rqstp,
                  struct nfsd4_compound_state *cstate,
                  struct nfsd4_exchange_id *exid)
 {
        struct nfs4_client *unconf, *conf, *new;
-       int status;
+       __be32 status;
        unsigned int            strhashval;
        char                    dname[HEXDIR_LEN];
        char                    addr_str[INET6_ADDRSTRLEN];
        nfs4_verifier           verf = exid->verifier;
        struct sockaddr         *sa = svc_addr(rqstp);
+       bool    update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
 
        rpc_ntop(sa, addr_str, sizeof(addr_str));
        dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
@@ -1507,71 +1580,63 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
        status = nfs4_make_rec_clidname(dname, &exid->clname);
 
        if (status)
-               goto error;
+               return status;
 
        strhashval = clientstr_hashval(dname);
 
+       /* Cases below refer to rfc 5661 section 18.35.4: */
        nfs4_lock_state();
-       status = nfs_ok;
-
        conf = find_confirmed_client_by_str(dname, strhashval);
        if (conf) {
-               if (!clp_used_exchangeid(conf)) {
-                       status = nfserr_clid_inuse; /* XXX: ? */
-                       goto out;
-               }
-               if (!same_verf(&verf, &conf->cl_verifier)) {
-                       /* 18.35.4 case 8 */
-                       if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+               bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
+               bool verfs_match = same_verf(&verf, &conf->cl_verifier);
+
+               if (update) {
+                       if (!clp_used_exchangeid(conf)) { /* buggy client */
+                               status = nfserr_inval;
+                               goto out;
+                       }
+                       if (!creds_match) { /* case 9 */
+                               status = nfserr_perm;
+                               goto out;
+                       }
+                       if (!verfs_match) { /* case 8 */
                                status = nfserr_not_same;
                                goto out;
                        }
-                       /* Client reboot: destroy old state */
-                       expire_client(conf);
-                       goto out_new;
+                       /* case 6 */
+                       exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
+                       new = conf;
+                       goto out_copy;
                }
-               if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
-                       /* 18.35.4 case 9 */
-                       if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
-                               status = nfserr_perm;
+               if (!creds_match) { /* case 3 */
+                       if (client_has_state(conf)) {
+                               status = nfserr_clid_inuse;
                                goto out;
                        }
                        expire_client(conf);
                        goto out_new;
                }
-               /*
-                * Set bit when the owner id and verifier map to an already
-                * confirmed client id (18.35.3).
-                */
-               exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
-
-               /*
-                * Falling into 18.35.4 case 2, possible router replay.
-                * Leave confirmed record intact and return same result.
-                */
-               copy_verf(conf, &verf);
-               new = conf;
-               goto out_copy;
+               if (verfs_match) { /* case 2 */
+                       conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
+                       new = conf;
+                       goto out_copy;
+               }
+               /* case 5, client reboot */
+               goto out_new;
        }
 
-       /* 18.35.4 case 7 */
-       if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+       if (update) { /* case 7 */
                status = nfserr_noent;
                goto out;
        }
 
        unconf  = find_unconfirmed_client_by_str(dname, strhashval);
-       if (unconf) {
-               /*
-                * Possible retry or client restart.  Per 18.35.4 case 4,
-                * a new unconfirmed record should be generated regardless
-                * of whether any properties have changed.
-                */
+       if (unconf) /* case 4, possible retry or client restart */
                expire_client(unconf);
-       }
 
+       /* case 1 (normal case) */
 out_new:
-       /* Normal case */
        new = create_client(exid->clname, dname, rqstp, &verf);
        if (new == NULL) {
                status = nfserr_jukebox;
@@ -1584,7 +1649,7 @@ out_copy:
        exid->clientid.cl_boot = new->cl_clientid.cl_boot;
        exid->clientid.cl_id = new->cl_clientid.cl_id;
 
-       exid->seqid = 1;
+       exid->seqid = new->cl_cs_slot.sl_seqid + 1;
        nfsd4_set_ex_flags(new, exid);
 
        dprintk("nfsd4_exchange_id seqid %d flags %x\n",
@@ -1593,12 +1658,10 @@ out_copy:
 
 out:
        nfs4_unlock_state();
-error:
-       dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
        return status;
 }
 
-static int
+static __be32
 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
 {
        dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
@@ -1626,7 +1689,7 @@ check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
  */
 static void
 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
-                          struct nfsd4_clid_slot *slot, int nfserr)
+                          struct nfsd4_clid_slot *slot, __be32 nfserr)
 {
        slot->sl_status = nfserr;
        memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
@@ -1657,7 +1720,7 @@ nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
                                /* seqid, slotID, slotID, slotID, status */ \
                        5 ) * sizeof(__be32))
 
-static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
+static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
 {
        return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
                || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
@@ -1673,7 +1736,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
        struct nfsd4_session *new;
        struct nfsd4_clid_slot *cs_slot = NULL;
        bool confirm_me = false;
-       int status = 0;
+       __be32 status = 0;
 
        if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
                return nfserr_inval;
@@ -1686,16 +1749,10 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                cs_slot = &conf->cl_cs_slot;
                status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
                if (status == nfserr_replay_cache) {
-                       dprintk("Got a create_session replay! seqid= %d\n",
-                               cs_slot->sl_seqid);
-                       /* Return the cached reply status */
                        status = nfsd4_replay_create_session(cr_ses, cs_slot);
                        goto out;
                } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
                        status = nfserr_seq_misordered;
-                       dprintk("Sequence misordered!\n");
-                       dprintk("Expected seqid= %d but got seqid= %d\n",
-                               cs_slot->sl_seqid, cr_ses->seqid);
                        goto out;
                }
        } else if (unconf) {
@@ -1704,7 +1761,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                        status = nfserr_clid_inuse;
                        goto out;
                }
-
                cs_slot = &unconf->cl_cs_slot;
                status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
                if (status) {
@@ -1712,7 +1768,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                        status = nfserr_seq_misordered;
                        goto out;
                }
-
                confirm_me = true;
                conf = unconf;
        } else {
@@ -1749,8 +1804,14 @@ nfsd4_create_session(struct svc_rqst *rqstp,
 
        /* cache solo and embedded create sessions under the state lock */
        nfsd4_cache_create_session(cr_ses, cs_slot, status);
-       if (confirm_me)
+       if (confirm_me) {
+               unsigned int hash = clientstr_hashval(unconf->cl_recdir);
+               struct nfs4_client *old =
+                       find_confirmed_client_by_str(conf->cl_recdir, hash);
+               if (old)
+                       expire_client(old);
                move_to_confirmed(conf);
+       }
 out:
        nfs4_unlock_state();
        dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1818,7 +1879,7 @@ nfsd4_destroy_session(struct svc_rqst *r,
                      struct nfsd4_destroy_session *sessionid)
 {
        struct nfsd4_session *ses;
-       u32 status = nfserr_badsession;
+       __be32 status = nfserr_badsession;
 
        /* Notes:
         * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
@@ -1914,7 +1975,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
        struct nfsd4_session *session;
        struct nfsd4_slot *slot;
        struct nfsd4_conn *conn;
-       int status;
+       __be32 status;
 
        if (resp->opcnt != 1)
                return nfserr_sequence_pos;
@@ -2008,18 +2069,11 @@ out:
        return status;
 }
 
-static inline bool has_resources(struct nfs4_client *clp)
-{
-       return !list_empty(&clp->cl_openowners)
-               || !list_empty(&clp->cl_delegations)
-               || !list_empty(&clp->cl_sessions);
-}
-
 __be32
 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
 {
        struct nfs4_client *conf, *unconf, *clp;
-       int status = 0;
+       __be32 status = 0;
 
        nfs4_lock_state();
        unconf = find_unconfirmed_client(&dc->clientid);
@@ -2028,7 +2082,7 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
        if (conf) {
                clp = conf;
 
-               if (!is_client_expired(conf) && has_resources(conf)) {
+               if (!is_client_expired(conf) && client_has_state(conf)) {
                        status = nfserr_clientid_busy;
                        goto out;
                }
@@ -2055,7 +2109,7 @@ out:
 __be32
 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
 {
-       int status = 0;
+       __be32 status = 0;
 
        if (rc->rca_one_fs) {
                if (!cstate->current_fh.fh_dentry)
@@ -2106,17 +2160,13 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                return status;
 
-       /* 
-        * XXX The Duplicate Request Cache (DRC) has been checked (??)
-        * We get here on a DRC miss.
-        */
-
        strhashval = clientstr_hashval(dname);
 
+       /* Cases below refer to rfc 3530 section 14.2.33: */
        nfs4_lock_state();
        conf = find_confirmed_client_by_str(dname, strhashval);
        if (conf) {
-               /* RFC 3530 14.2.33 CASE 0: */
+               /* case 0: */
                status = nfserr_clid_inuse;
                if (clp_used_exchangeid(conf))
                        goto out;
@@ -2129,63 +2179,18 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        goto out;
                }
        }
-       /*
-        * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
-        * has a description of SETCLIENTID request processing consisting
-        * of 5 bullet points, labeled as CASE0 - CASE4 below.
-        */
        unconf = find_unconfirmed_client_by_str(dname, strhashval);
+       if (unconf)
+               expire_client(unconf);
        status = nfserr_jukebox;
-       if (!conf) {
-               /*
-                * RFC 3530 14.2.33 CASE 4:
-                * placed first, because it is the normal case
-                */
-               if (unconf)
-                       expire_client(unconf);
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
-               gen_clid(new);
-       } else if (same_verf(&conf->cl_verifier, &clverifier)) {
-               /*
-                * RFC 3530 14.2.33 CASE 1:
-                * probable callback update
-                */
-               if (unconf) {
-                       /* Note this is removing unconfirmed {*x***},
-                        * which is stronger than RFC recommended {vxc**}.
-                        * This has the advantage that there is at most
-                        * one {*x***} in either list at any time.
-                        */
-                       expire_client(unconf);
-               }
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
+       new = create_client(clname, dname, rqstp, &clverifier);
+       if (new == NULL)
+               goto out;
+       if (conf && same_verf(&conf->cl_verifier, &clverifier))
+               /* case 1: probable callback update */
                copy_clid(new, conf);
-       } else if (!unconf) {
-               /*
-                * RFC 3530 14.2.33 CASE 2:
-                * probable client reboot; state will be removed if
-                * confirmed.
-                */
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
-               gen_clid(new);
-       } else {
-               /*
-                * RFC 3530 14.2.33 CASE 3:
-                * probable client reboot; state will be removed if
-                * confirmed.
-                */
-               expire_client(unconf);
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
+       else /* case 4 (new client) or cases 2, 3 (client reboot): */
                gen_clid(new);
-       }
        /*
         * XXX: we should probably set this at creation time, and check
         * for consistent minorversion use throughout:
@@ -2203,17 +2208,11 @@ out:
 }
 
 
-/*
- * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
- * a description of SETCLIENTID_CONFIRM request processing consisting of 4
- * bullets, labeled as CASE1 - CASE4 below.
- */
 __be32
 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
                         struct nfsd4_compound_state *cstate,
                         struct nfsd4_setclientid_confirm *setclientid_confirm)
 {
-       struct sockaddr *sa = svc_addr(rqstp);
        struct nfs4_client *conf, *unconf;
        nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
        clientid_t * clid = &setclientid_confirm->sc_clientid;
@@ -2221,84 +2220,44 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
 
        if (STALE_CLIENTID(clid))
                return nfserr_stale_clientid;
-       /* 
-        * XXX The Duplicate Request Cache (DRC) has been checked (??)
-        * We get here on a DRC miss.
-        */
-
        nfs4_lock_state();
 
        conf = find_confirmed_client(clid);
        unconf = find_unconfirmed_client(clid);
-
-       status = nfserr_clid_inuse;
-       if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
-               goto out;
-       if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
-               goto out;
-
        /*
-        * section 14.2.34 of RFC 3530 has a description of
-        * SETCLIENTID_CONFIRM request processing consisting
-        * of 4 bullet points, labeled as CASE1 - CASE4 below.
+        * We try hard to give out unique clientid's, so if we get an
+        * attempt to confirm the same clientid with a different cred,
+        * there's a bug somewhere.  Let's charitably assume it's our
+        * bug.
         */
-       if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) {
-               /*
-                * RFC 3530 14.2.34 CASE 1:
-                * callback update
-                */
-               if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
-                       status = nfserr_clid_inuse;
-               else {
-                       nfsd4_change_callback(conf, &unconf->cl_cb_conn);
-                       nfsd4_probe_callback(conf);
-                       expire_client(unconf);
+       status = nfserr_serverfault;
+       if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
+               goto out;
+       if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
+               goto out;
+       /* cases below refer to rfc 3530 section 14.2.34: */
+       if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
+               if (conf && !unconf) /* case 2: probable retransmit */
                        status = nfs_ok;
+               else /* case 4: client hasn't noticed we rebooted yet? */
+                       status = nfserr_stale_clientid;
+               goto out;
+       }
+       status = nfs_ok;
+       if (conf) { /* case 1: callback update */
+               nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+               nfsd4_probe_callback(conf);
+               expire_client(unconf);
+       } else { /* case 3: normal case; new or rebooted client */
+               unsigned int hash = clientstr_hashval(unconf->cl_recdir);
 
+               conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
+               if (conf) {
+                       nfsd4_client_record_remove(conf);
+                       expire_client(conf);
                }
-       } else if (conf && !unconf) {
-               /*
-                * RFC 3530 14.2.34 CASE 2:
-                * probable retransmitted request; play it safe and
-                * do nothing.
-                */
-               if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
-                       status = nfserr_clid_inuse;
-               else
-                       status = nfs_ok;
-       } else if (!conf && unconf
-                       && same_verf(&unconf->cl_confirm, &confirm)) {
-               /*
-                * RFC 3530 14.2.34 CASE 3:
-                * Normal case; new or rebooted client:
-                */
-               if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
-                       status = nfserr_clid_inuse;
-               } else {
-                       unsigned int hash =
-                               clientstr_hashval(unconf->cl_recdir);
-                       conf = find_confirmed_client_by_str(unconf->cl_recdir,
-                                                           hash);
-                       if (conf) {
-                               nfsd4_client_record_remove(conf);
-                               expire_client(conf);
-                       }
-                       move_to_confirmed(unconf);
-                       conf = unconf;
-                       nfsd4_probe_callback(conf);
-                       status = nfs_ok;
-               }
-       } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
-           && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
-                                                               &confirm)))) {
-               /*
-                * RFC 3530 14.2.34 CASE 4:
-                * Client probably hasn't noticed that we rebooted yet.
-                */
-               status = nfserr_stale_clientid;
-       } else {
-               /* check that we have hit one of the cases...*/
-               status = nfserr_clid_inuse;
+               move_to_confirmed(unconf);
+               nfsd4_probe_callback(unconf);
        }
 out:
        nfs4_unlock_state();
@@ -2454,8 +2413,8 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        stp->st_file = fp;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = 0;
-       __set_bit(open->op_share_access, &stp->st_access_bmap);
-       __set_bit(open->op_share_deny, &stp->st_deny_bmap);
+       set_access(open->op_share_access, stp);
+       set_deny(open->op_share_deny, stp);
        stp->st_openstp = NULL;
 }
 
@@ -2534,8 +2493,8 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
        ret = nfserr_locked;
        /* Search for conflicting share reservations */
        list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
-               if (test_bit(deny_type, &stp->st_deny_bmap) ||
-                   test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
+               if (test_deny(deny_type, stp) ||
+                   test_deny(NFS4_SHARE_DENY_BOTH, stp))
                        goto out;
        }
        ret = nfs_ok;
@@ -2791,7 +2750,7 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
        bool new_access;
        __be32 status;
 
-       new_access = !test_bit(op_share_access, &stp->st_access_bmap);
+       new_access = !test_access(op_share_access, stp);
        if (new_access) {
                status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
                if (status)
@@ -2806,8 +2765,8 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
                return status;
        }
        /* remember the open */
-       __set_bit(op_share_access, &stp->st_access_bmap);
-       __set_bit(open->op_share_deny, &stp->st_deny_bmap);
+       set_access(op_share_access, stp);
+       set_deny(open->op_share_deny, stp);
 
        return nfs_ok;
 }
@@ -3155,10 +3114,17 @@ out:
 static struct lock_manager nfsd4_manager = {
 };
 
+static bool grace_ended;
+
 static void
 nfsd4_end_grace(void)
 {
+       /* do nothing if grace period already ended */
+       if (grace_ended)
+               return;
+
        dprintk("NFSD: end of grace period\n");
+       grace_ended = true;
        nfsd4_record_grace_done(&init_net, boot_time);
        locks_end_grace(&nfsd4_manager);
        /*
@@ -3183,8 +3149,7 @@ nfs4_laundromat(void)
        nfs4_lock_state();
 
        dprintk("NFSD: laundromat service - starting\n");
-       if (locks_in_grace())
-               nfsd4_end_grace();
+       nfsd4_end_grace();
        INIT_LIST_HEAD(&reaplist);
        spin_lock(&client_lock);
        list_for_each_safe(pos, next, &client_lru) {
@@ -3276,18 +3241,18 @@ STALE_STATEID(stateid_t *stateid)
 }
 
 static inline int
-access_permit_read(unsigned long access_bmap)
+access_permit_read(struct nfs4_ol_stateid *stp)
 {
-       return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
-               test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
-               test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
+       return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
+               test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
+               test_access(NFS4_SHARE_ACCESS_WRITE, stp);
 }
 
 static inline int
-access_permit_write(unsigned long access_bmap)
+access_permit_write(struct nfs4_ol_stateid *stp)
 {
-       return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
-               test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
+       return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
+               test_access(NFS4_SHARE_ACCESS_BOTH, stp);
 }
 
 static
@@ -3298,9 +3263,9 @@ __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
        /* For lock stateid's, we test the parent open, not the lock: */
        if (stp->st_openstp)
                stp = stp->st_openstp;
-       if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
+       if ((flags & WR_STATE) && !access_permit_write(stp))
                 goto out;
-       if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
+       if ((flags & RD_STATE) && !access_permit_read(stp))
                 goto out;
        status = nfs_ok;
 out:
@@ -3340,7 +3305,7 @@ static bool stateid_generation_after(stateid_t *a, stateid_t *b)
        return (s32)a->si_generation - (s32)b->si_generation > 0;
 }
 
-static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
+static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
 {
        /*
         * When sessions are used the stateid generation number is ignored
@@ -3649,10 +3614,10 @@ out:
 
 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
 {
-       if (!test_bit(access, &stp->st_access_bmap))
+       if (!test_access(access, stp))
                return;
        nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
-       __clear_bit(access, &stp->st_access_bmap);
+       clear_access(access, stp);
 }
 
 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
@@ -3674,12 +3639,12 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
 }
 
 static void
-reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
+reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
 {
        int i;
        for (i = 0; i < 4; i++) {
                if ((i & deny) != i)
-                       __clear_bit(i, bmap);
+                       clear_deny(i, stp);
        }
 }
 
@@ -3706,19 +3671,19 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        if (status)
                goto out; 
        status = nfserr_inval;
-       if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
-               dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
+       if (!test_access(od->od_share_access, stp)) {
+               dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
                        stp->st_access_bmap, od->od_share_access);
                goto out;
        }
-       if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
+       if (!test_deny(od->od_share_deny, stp)) {
                dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
                        stp->st_deny_bmap, od->od_share_deny);
                goto out;
        }
        nfs4_stateid_downgrade(stp, od->od_share_access);
 
-       reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
+       reset_union_bmap_deny(od->od_share_deny, stp);
 
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4008,13 +3973,13 @@ static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
        struct nfs4_file *fp = lock_stp->st_file;
        int oflag = nfs4_access_to_omode(access);
 
-       if (test_bit(access, &lock_stp->st_access_bmap))
+       if (test_access(access, lock_stp))
                return;
        nfs4_file_get_access(fp, oflag);
-       __set_bit(access, &lock_stp->st_access_bmap);
+       set_access(access, lock_stp);
 }
 
-__be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
+static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
 {
        struct nfs4_file *fi = ost->st_file;
        struct nfs4_openowner *oo = openowner(ost->st_stateowner);
@@ -4055,7 +4020,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct nfs4_openowner *open_sop = NULL;
        struct nfs4_lockowner *lock_sop = NULL;
        struct nfs4_ol_stateid *lock_stp;
-       struct nfs4_file *fp;
        struct file *filp = NULL;
        struct file_lock file_lock;
        struct file_lock conflock;
@@ -4123,7 +4087,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        goto out;
        }
        lock_sop = lockowner(lock_stp->st_stateowner);
-       fp = lock_stp->st_file;
 
        lkflg = setlkflg(lock->lk_type);
        status = nfs4_check_openmode(lock_stp, lkflg);
@@ -4715,6 +4678,7 @@ nfs4_state_start(void)
        nfsd4_client_tracking_init(&init_net);
        boot_time = get_seconds();
        locks_start_grace(&nfsd4_manager);
+       grace_ended = false;
        printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
               nfsd4_grace);
        ret = set_callback_cred();
index 74c00bc92b9af6b01e95e55c119b90d61fbf9d34..4949667c84ea0c3d687a46faf0a455c410c39b6f 100644 (file)
@@ -1674,12 +1674,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
 
 static void write32(__be32 **p, u32 n)
 {
-       *(*p)++ = n;
+       *(*p)++ = htonl(n);
 }
 
 static void write64(__be32 **p, u64 n)
 {
-       write32(p, (u32)(n >> 32));
+       write32(p, (n >> 32));
        write32(p, (u32)n);
 }
 
@@ -1744,15 +1744,16 @@ static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, _
 }
 
 /* Encode as an array of strings the string given with components
- * separated @sep.
+ * separated @sep, escaped with esc_enter and esc_exit.
  */
-static __be32 nfsd4_encode_components(char sep, char *components,
-                                  __be32 **pp, int *buflen)
+static __be32 nfsd4_encode_components_esc(char sep, char *components,
+                                  __be32 **pp, int *buflen,
+                                  char esc_enter, char esc_exit)
 {
        __be32 *p = *pp;
        __be32 *countp = p;
        int strlen, count=0;
-       char *str, *end;
+       char *str, *end, *next;
 
        dprintk("nfsd4_encode_components(%s)\n", components);
        if ((*buflen -= 4) < 0)
@@ -1760,8 +1761,23 @@ static __be32 nfsd4_encode_components(char sep, char *components,
        WRITE32(0); /* We will fill this in with @count later */
        end = str = components;
        while (*end) {
-               for (; *end && (*end != sep); end++)
-                       ; /* Point to end of component */
+               bool found_esc = false;
+
+               /* try to parse as esc_start, ..., esc_end, sep */
+               if (*str == esc_enter) {
+                       for (; *end && (*end != esc_exit); end++)
+                               /* find esc_exit or end of string */;
+                       next = end + 1;
+                       if (*end && (!*next || *next == sep)) {
+                               str++;
+                               found_esc = true;
+                       }
+               }
+
+               if (!found_esc)
+                       for (; *end && (*end != sep); end++)
+                               /* find sep or end of string */;
+
                strlen = end - str;
                if (strlen) {
                        if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0)
@@ -1780,6 +1796,15 @@ static __be32 nfsd4_encode_components(char sep, char *components,
        return 0;
 }
 
+/* Encode as an array of strings the string given with components
+ * separated @sep.
+ */
+static __be32 nfsd4_encode_components(char sep, char *components,
+                                  __be32 **pp, int *buflen)
+{
+       return nfsd4_encode_components_esc(sep, components, pp, buflen, 0, 0);
+}
+
 /*
  * encode a location element of a fs_locations structure
  */
@@ -1789,7 +1814,8 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
        __be32 status;
        __be32 *p = *pp;
 
-       status = nfsd4_encode_components(':', location->hosts, &p, buflen);
+       status = nfsd4_encode_components_esc(':', location->hosts, &p, buflen,
+                                               '[', ']');
        if (status)
                return status;
        status = nfsd4_encode_components('/', location->path, &p, buflen);
@@ -3251,7 +3277,7 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
 }
 
 static __be32
-nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
                         struct nfsd4_exchange_id *exid)
 {
        __be32 *p;
@@ -3306,7 +3332,7 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
 }
 
 static __be32
-nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
                            struct nfsd4_create_session *sess)
 {
        __be32 *p;
@@ -3355,14 +3381,14 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
 }
 
 static __be32
-nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
                             struct nfsd4_destroy_session *destroy_session)
 {
        return nfserr;
 }
 
 static __be32
-nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
                          struct nfsd4_free_stateid *free_stateid)
 {
        __be32 *p;
@@ -3371,13 +3397,13 @@ nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr,
                return nfserr;
 
        RESERVE_SPACE(4);
-       WRITE32(nfserr);
+       *p++ = nfserr;
        ADJUST_ARGS();
        return nfserr;
 }
 
 static __be32
-nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
                      struct nfsd4_sequence *seq)
 {
        __be32 *p;
@@ -3399,8 +3425,8 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
        return 0;
 }
 
-__be32
-nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
+static __be32
+nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
                          struct nfsd4_test_stateid *test_stateid)
 {
        struct nfsd4_test_stateid_id *stateid, *next;
@@ -3503,7 +3529,7 @@ static nfsd4_enc nfsd4_enc_ops[] = {
  * Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so
  * will be at least a page and will therefore hold the xdr_buf head.
  */
-int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
+__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
 {
        struct xdr_buf *xb = &resp->rqstp->rq_res;
        struct nfsd4_session *session = NULL;
index 2c53be6d357957332478ed1d55d62c85cf9f26cc..c55298ed5772577e5afe3bd613c3e5a0df3b69dd 100644 (file)
@@ -127,7 +127,17 @@ static const struct file_operations transaction_ops = {
 
 static int exports_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &nfs_exports_op);
+       int err;
+       struct seq_file *seq;
+       struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+
+       err = seq_open(file, &nfs_exports_op);
+       if (err)
+               return err;
+
+       seq = file->private_data;
+       seq->private = nn->svc_export_cache;
+       return 0;
 }
 
 static const struct file_operations exports_operations = {
@@ -345,7 +355,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
        if (!dom)
                return -ENOMEM;
 
-       len = exp_rootfh(dom, path, &fh,  maxsize);
+       len = exp_rootfh(&init_net, dom, path, &fh,  maxsize);
        auth_domain_put(dom);
        if (len)
                return len;
@@ -651,6 +661,7 @@ static ssize_t __write_ports_addfd(char *buf)
 {
        char *mesg = buf;
        int fd, err;
+       struct net *net = &init_net;
 
        err = get_int(&mesg, &fd);
        if (err != 0 || fd < 0)
@@ -662,6 +673,8 @@ static ssize_t __write_ports_addfd(char *buf)
 
        err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
        if (err < 0) {
+               if (nfsd_serv->sv_nrthreads == 1)
+                       svc_shutdown_net(nfsd_serv, net);
                svc_destroy(nfsd_serv);
                return err;
        }
@@ -699,6 +712,7 @@ static ssize_t __write_ports_addxprt(char *buf)
        char transport[16];
        struct svc_xprt *xprt;
        int port, err;
+       struct net *net = &init_net;
 
        if (sscanf(buf, "%15s %4u", transport, &port) != 2)
                return -EINVAL;
@@ -710,12 +724,12 @@ static ssize_t __write_ports_addxprt(char *buf)
        if (err != 0)
                return err;
 
-       err = svc_create_xprt(nfsd_serv, transport, &init_net,
+       err = svc_create_xprt(nfsd_serv, transport, net,
                                PF_INET, port, SVC_SOCK_ANONYMOUS);
        if (err < 0)
                goto out_err;
 
-       err = svc_create_xprt(nfsd_serv, transport, &init_net,
+       err = svc_create_xprt(nfsd_serv, transport, net,
                                PF_INET6, port, SVC_SOCK_ANONYMOUS);
        if (err < 0 && err != -EAFNOSUPPORT)
                goto out_close;
@@ -724,12 +738,14 @@ static ssize_t __write_ports_addxprt(char *buf)
        nfsd_serv->sv_nrthreads--;
        return 0;
 out_close:
-       xprt = svc_find_xprt(nfsd_serv, transport, &init_net, PF_INET, port);
+       xprt = svc_find_xprt(nfsd_serv, transport, net, PF_INET, port);
        if (xprt != NULL) {
                svc_close_xprt(xprt);
                svc_xprt_put(xprt);
        }
 out_err:
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);
        return err;
 }
@@ -1127,7 +1143,34 @@ static int create_proc_exports_entry(void)
 #endif
 
 int nfsd_net_id;
+
+static __net_init int nfsd_init_net(struct net *net)
+{
+       int retval;
+
+       retval = nfsd_export_init(net);
+       if (retval)
+               goto out_export_error;
+       retval = nfsd_idmap_init(net);
+       if (retval)
+               goto out_idmap_error;
+       return 0;
+
+out_idmap_error:
+       nfsd_export_shutdown(net);
+out_export_error:
+       return retval;
+}
+
+static __net_exit void nfsd_exit_net(struct net *net)
+{
+       nfsd_idmap_shutdown(net);
+       nfsd_export_shutdown(net);
+}
+
 static struct pernet_operations nfsd_net_ops = {
+       .init = nfsd_init_net,
+       .exit = nfsd_exit_net,
        .id   = &nfsd_net_id,
        .size = sizeof(struct nfsd_net),
 };
@@ -1154,16 +1197,10 @@ static int __init init_nfsd(void)
        retval = nfsd_reply_cache_init();
        if (retval)
                goto out_free_stat;
-       retval = nfsd_export_init();
-       if (retval)
-               goto out_free_cache;
        nfsd_lockd_init();      /* lockd->nfsd callbacks */
-       retval = nfsd_idmap_init();
-       if (retval)
-               goto out_free_lockd;
        retval = create_proc_exports_entry();
        if (retval)
-               goto out_free_idmap;
+               goto out_free_lockd;
        retval = register_filesystem(&nfsd_fs_type);
        if (retval)
                goto out_free_all;
@@ -1171,12 +1208,8 @@ static int __init init_nfsd(void)
 out_free_all:
        remove_proc_entry("fs/nfs/exports", NULL);
        remove_proc_entry("fs/nfs", NULL);
-out_free_idmap:
-       nfsd_idmap_shutdown();
 out_free_lockd:
        nfsd_lockd_shutdown();
-       nfsd_export_shutdown();
-out_free_cache:
        nfsd_reply_cache_shutdown();
 out_free_stat:
        nfsd_stat_shutdown();
@@ -1192,13 +1225,11 @@ out_unregister_notifier:
 
 static void __exit exit_nfsd(void)
 {
-       nfsd_export_shutdown();
        nfsd_reply_cache_shutdown();
        remove_proc_entry("fs/nfs/exports", NULL);
        remove_proc_entry("fs/nfs", NULL);
        nfsd_stat_shutdown();
        nfsd_lockd_shutdown();
-       nfsd_idmap_shutdown();
        nfsd4_free_slabs();
        nfsd_fault_inject_cleanup();
        unregister_filesystem(&nfsd_fs_type);
index 68454e75fce967b95bbb01158def83733de60976..cc793005a87cb4b5a79b7074c4861ca651085d1c 100644 (file)
@@ -636,7 +636,7 @@ fh_put(struct svc_fh *fhp)
 #endif
        }
        if (exp) {
-               cache_put(&exp->h, &svc_export_cache);
+               exp_put(exp);
                fhp->fh_export = NULL;
        }
        return;
index 28dfad39f0c50a626384c4363955e2b9d7e3212f..ee709fc8f58bc0b62a3f7ca64104630fe803b6d0 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/fs_struct.h>
 #include <linux/swap.h>
+#include <linux/nsproxy.h>
 
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/svcsock.h>
@@ -220,7 +221,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
        ret = nfsd_init_socks(port);
        if (ret)
                goto out_racache;
-       ret = lockd_up();
+       ret = lockd_up(&init_net);
        if (ret)
                goto out_racache;
        ret = nfs4_state_start();
@@ -229,7 +230,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
        nfsd_up = true;
        return 0;
 out_lockd:
-       lockd_down();
+       lockd_down(&init_net);
 out_racache:
        nfsd_racache_shutdown();
        return ret;
@@ -246,7 +247,7 @@ static void nfsd_shutdown(void)
        if (!nfsd_up)
                return;
        nfs4_state_shutdown();
-       lockd_down();
+       lockd_down(&init_net);
        nfsd_racache_shutdown();
        nfsd_up = false;
 }
@@ -261,7 +262,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 
        printk(KERN_WARNING "nfsd: last server has exited, flushing export "
                            "cache\n");
-       nfsd_export_flush();
+       nfsd_export_flush(net);
 }
 
 void nfsd_reset_versions(void)
@@ -330,6 +331,8 @@ static int nfsd_get_default_max_blksize(void)
 
 int nfsd_create_serv(void)
 {
+       int error;
+
        WARN_ON(!mutex_is_locked(&nfsd_mutex));
        if (nfsd_serv) {
                svc_get(nfsd_serv);
@@ -343,6 +346,12 @@ int nfsd_create_serv(void)
        if (nfsd_serv == NULL)
                return -ENOMEM;
 
+       error = svc_bind(nfsd_serv, current->nsproxy->net_ns);
+       if (error < 0) {
+               svc_destroy(nfsd_serv);
+               return error;
+       }
+
        set_max_drc();
        do_gettimeofday(&nfssvc_boot);          /* record boot time */
        return 0;
@@ -373,6 +382,7 @@ int nfsd_set_nrthreads(int n, int *nthreads)
        int i = 0;
        int tot = 0;
        int err = 0;
+       struct net *net = &init_net;
 
        WARN_ON(!mutex_is_locked(&nfsd_mutex));
 
@@ -417,6 +427,9 @@ int nfsd_set_nrthreads(int n, int *nthreads)
                if (err)
                        break;
        }
+
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);
 
        return err;
@@ -432,6 +445,7 @@ nfsd_svc(unsigned short port, int nrservs)
 {
        int     error;
        bool    nfsd_up_before;
+       struct net *net = &init_net;
 
        mutex_lock(&nfsd_mutex);
        dprintk("nfsd: creating service\n");
@@ -464,6 +478,8 @@ out_shutdown:
        if (error < 0 && !nfsd_up_before)
                nfsd_shutdown();
 out_destroy:
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);         /* Release server */
 out:
        mutex_unlock(&nfsd_mutex);
@@ -547,6 +563,9 @@ nfsd(void *vrqstp)
        nfsdstats.th_cnt --;
 
 out:
+       if (rqstp->rq_server->sv_nrthreads == 1)
+               svc_shutdown_net(rqstp->rq_server, &init_net);
+
        /* Release the thread */
        svc_exit_thread(rqstp);
 
@@ -659,8 +678,12 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file)
 int nfsd_pool_stats_release(struct inode *inode, struct file *file)
 {
        int ret = seq_release(inode, file);
+       struct net *net = &init_net;
+
        mutex_lock(&nfsd_mutex);
        /* this function really, really should have been called svc_put() */
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);
        mutex_unlock(&nfsd_mutex);
        return ret;
index 89ab137d379a3f6756b5b5616083862e8f22d88f..849091e16ea6afd43e4ddd2dbd17962fdd87ad85 100644 (file)
@@ -232,7 +232,6 @@ struct nfs4_client {
        time_t                  cl_time;        /* time of last lease renewal */
        struct sockaddr_storage cl_addr;        /* client ipaddress */
        u32                     cl_flavor;      /* setclientid pseudoflavor */
-       char                    *cl_principal;  /* setclientid principal name */
        struct svc_cred         cl_cred;        /* setclientid principal */
        clientid_t              cl_clientid;    /* generated by server */
        nfs4_verifier           cl_confirm;     /* generated by server */
index 568666156ea4f59525d67207551ee8c45a3b730e..c8bd9c3be7f747410622fd1172b2c7243886f838 100644 (file)
@@ -2039,7 +2039,7 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
        if (err)
                goto out;
 
-       offset = vfs_llseek(file, offset, 0);
+       offset = vfs_llseek(file, offset, SEEK_SET);
        if (offset < 0) {
                err = nfserrno((int)offset);
                goto out_close;
index 1b3501598ab5dbb4609ba19e4f7c3322b29f70ba..acd127d4ee821660e71fe1e38ef1c804962f6508 100644 (file)
@@ -60,7 +60,7 @@ struct nfsd4_compound_state {
        __be32                  *datap;
        size_t                  iovlen;
        u32                     minorversion;
-       u32                     status;
+       __be32                  status;
        stateid_t       current_stateid;
        stateid_t       save_stateid;
        /* to indicate current and saved state id presents */
@@ -364,7 +364,7 @@ struct nfsd4_test_stateid_id {
 };
 
 struct nfsd4_test_stateid {
-       __be32          ts_num_ids;
+       u32             ts_num_ids;
        struct list_head ts_stateid_list;
 };
 
@@ -549,7 +549,7 @@ int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
                struct nfsd4_compoundargs *);
 int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
                struct nfsd4_compoundres *);
-int nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
+__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
 void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
 void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
 __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
index 26601529dc17c7ff9f5bc7d4c249a59371cd14c4..62cebc8e1a1fd49ceec5684de547bd8115eedac2 100644 (file)
@@ -37,6 +37,7 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         * This function should be implemented when the writeback function
         * will be implemented.
         */
+       struct the_nilfs *nilfs;
        struct inode *inode = file->f_mapping->host;
        int err;
 
@@ -45,18 +46,21 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                return err;
        mutex_lock(&inode->i_mutex);
 
-       if (!nilfs_inode_dirty(inode)) {
-               mutex_unlock(&inode->i_mutex);
-               return 0;
+       if (nilfs_inode_dirty(inode)) {
+               if (datasync)
+                       err = nilfs_construct_dsync_segment(inode->i_sb, inode,
+                                                           0, LLONG_MAX);
+               else
+                       err = nilfs_construct_segment(inode->i_sb);
        }
-
-       if (datasync)
-               err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0,
-                                                   LLONG_MAX);
-       else
-               err = nilfs_construct_segment(inode->i_sb);
-
        mutex_unlock(&inode->i_mutex);
+
+       nilfs = inode->i_sb->s_fs_info;
+       if (!err && nilfs_test_opt(nilfs, BARRIER)) {
+               err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               if (err != -EIO)
+                       err = 0;
+       }
        return err;
 }
 
index 08a07a218d26ef40ecc87db0560cdfedd5648e8f..57ceaf33d1773e6e1d2c7be8051b74795d663110 100644 (file)
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
        while (!list_empty(head)) {
                ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index 2a70fce70c65be1151783e3aba3c221e39642ba7..06658caa18bd229ab42e01b876538efaf2c48882 100644 (file)
@@ -692,8 +692,14 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
        if (ret < 0)
                return ret;
 
+       nilfs = inode->i_sb->s_fs_info;
+       if (nilfs_test_opt(nilfs, BARRIER)) {
+               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               if (ret == -EIO)
+                       return ret;
+       }
+
        if (argp != NULL) {
-               nilfs = inode->i_sb->s_fs_info;
                down_read(&nilfs->ns_segctor_sem);
                cno = nilfs->ns_cno - 1;
                up_read(&nilfs->ns_segctor_sem);
index 0bb2c2010b9512ba5fd971fdbdc34eba2abab886..b72847988b78d96d99b7571d17fea769e463c6b0 100644 (file)
@@ -508,31 +508,29 @@ static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh,
        return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen);
 }
 
-static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp,
-                          int connectable)
+static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
+                          struct inode *parent)
 {
        struct nilfs_fid *fid = (struct nilfs_fid *)fh;
-       struct inode *inode = dentry->d_inode;
        struct nilfs_root *root = NILFS_I(inode)->i_root;
        int type;
 
-       if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE ||
-           (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE))
+       if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) {
+               *lenp = NILFS_FID_SIZE_CONNECTABLE;
+               return 255;
+       }
+       if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) {
+               *lenp = NILFS_FID_SIZE_NON_CONNECTABLE;
                return 255;
+       }
 
        fid->cno = root->cno;
        fid->ino = inode->i_ino;
        fid->gen = inode->i_generation;
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                fid->parent_ino = parent->i_ino;
                fid->parent_gen = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
-
                type = FILEID_NILFS_WITH_PARENT;
                *lenp = NILFS_FID_SIZE_CONNECTABLE;
        } else {
index 0e72ad6f22aacfe7c30776eaae2ecdef7f22aba3..88e11fb346b6d0fd6e81fb03994fe18cac514bb7 100644 (file)
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
                        continue;
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index a39edc41becc29e76c67b366de2032ad3314268a..e2ce79ef48c467d02f18c0c9fd4ad409c8ca0b5e 100644 (file)
@@ -30,7 +30,7 @@ config NLS_DEFAULT
          cp949, cp950, cp1251, cp1255, euc-jp, euc-kr, gb2312, iso8859-1,
          iso8859-2, iso8859-3, iso8859-4, iso8859-5, iso8859-6, iso8859-7,
          iso8859-8, iso8859-9, iso8859-13, iso8859-14, iso8859-15,
-         koi8-r, koi8-ru, koi8-u, sjis, tis-620, utf8.
+         koi8-r, koi8-ru, koi8-u, sjis, tis-620, macroman, utf8.
          If you specify a wrong value, it will use the built-in NLS;
          compatible with iso8859-1.
 
@@ -452,6 +452,161 @@ config NLS_KOI8_U
          input/output character sets. Say Y here for the preferred Ukrainian
          (koi8-u) and Belarusian (koi8-ru) character sets.
 
+config NLS_MAC_ROMAN
+       tristate "Codepage macroman"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         much of Europe -- United Kingdom, Germany, Spain, Italy, and [add
+         more countries here].
+
+         If unsure, say Y.
+
+config NLS_MAC_CELTIC
+       tristate "Codepage macceltic"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Celtic.
+
+         If unsure, say Y.
+
+config NLS_MAC_CENTEURO
+       tristate "Codepage maccenteuro"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Central Europe.
+
+         If unsure, say Y.
+
+config NLS_MAC_CROATIAN
+       tristate "Codepage maccroatian"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Croatian.
+
+         If unsure, say Y.
+
+config NLS_MAC_CYRILLIC
+       tristate "Codepage maccyrillic"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Cyrillic.
+
+         If unsure, say Y.
+
+config NLS_MAC_GAELIC
+       tristate "Codepage macgaelic"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Gaelic.
+
+         If unsure, say Y.
+
+config NLS_MAC_GREEK
+       tristate "Codepage macgreek"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Greek.
+
+         If unsure, say Y.
+
+config NLS_MAC_ICELAND
+       tristate "Codepage maciceland"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Iceland.
+
+         If unsure, say Y.
+
+config NLS_MAC_INUIT
+       tristate "Codepage macinuit"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Inuit.
+
+         If unsure, say Y.
+
+config NLS_MAC_ROMANIAN
+       tristate "Codepage macromanian"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Romanian.
+
+         If unsure, say Y.
+
+config NLS_MAC_TURKISH
+       tristate "Codepage macturkish"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Turkish.
+
+         If unsure, say Y.
+
 config NLS_UTF8
        tristate "NLS UTF-8"
        help
index f499dd7c3905bfcee723ab86bf231b13dc27300a..8ae37c1b524995d49a8b0a0ba38362ebfd749f47 100644 (file)
@@ -42,3 +42,14 @@ obj-$(CONFIG_NLS_ISO8859_15) += nls_iso8859-15.o
 obj-$(CONFIG_NLS_KOI8_R)       += nls_koi8-r.o
 obj-$(CONFIG_NLS_KOI8_U)       += nls_koi8-u.o nls_koi8-ru.o
 obj-$(CONFIG_NLS_UTF8)         += nls_utf8.o
+obj-$(CONFIG_NLS_MAC_CELTIC)    += mac-celtic.o
+obj-$(CONFIG_NLS_MAC_CENTEURO)  += mac-centeuro.o
+obj-$(CONFIG_NLS_MAC_CROATIAN)  += mac-croatian.o
+obj-$(CONFIG_NLS_MAC_CYRILLIC)  += mac-cyrillic.o
+obj-$(CONFIG_NLS_MAC_GAELIC)    += mac-gaelic.o
+obj-$(CONFIG_NLS_MAC_GREEK)     += mac-greek.o
+obj-$(CONFIG_NLS_MAC_ICELAND)   += mac-iceland.o
+obj-$(CONFIG_NLS_MAC_INUIT)     += mac-inuit.o
+obj-$(CONFIG_NLS_MAC_ROMANIAN)  += mac-romanian.o
+obj-$(CONFIG_NLS_MAC_ROMAN)     += mac-roman.o
+obj-$(CONFIG_NLS_MAC_TURKISH)   += mac-turkish.o
diff --git a/fs/nls/mac-celtic.c b/fs/nls/mac-celtic.c
new file mode 100644 (file)
index 0000000..634a8b7
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-celtic.c
+ *
+ * Charset macceltic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0x0176, 0x0177,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x1ef2, 0x1ef3,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x2663, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x00dd, 0x00fd,
+       0x0174, 0x0175, 0x1e84, 0x1e85,
+       0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0x00, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   page1e, NULL,
+       page20, page21, page22, NULL,   NULL,   page25, page26, NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macceltic",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macceltic(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macceltic(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macceltic)
+module_exit(exit_nls_macceltic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-centeuro.c b/fs/nls/mac-centeuro.c
new file mode 100644 (file)
index 0000000..979e626
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/mac-centeuro.c
+ *
+ * Charset maccenteuro translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x0100, 0x0101, 0x00c9,
+       0x0104, 0x00d6, 0x00dc, 0x00e1,
+       0x0105, 0x010c, 0x00e4, 0x010d,
+       0x0106, 0x0107, 0x00e9, 0x0179,
+       /* 0x90 */
+       0x017a, 0x010e, 0x00ed, 0x010f,
+       0x0112, 0x0113, 0x0116, 0x00f3,
+       0x0117, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x011a, 0x011b, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x0118, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x0119,
+       0x00a8, 0x2260, 0x0123, 0x012e,
+       /* 0xb0 */
+       0x012f, 0x012a, 0x2264, 0x2265,
+       0x012b, 0x0136, 0x2202, 0x2211,
+       0x0142, 0x013b, 0x013c, 0x013d,
+       0x013e, 0x0139, 0x013a, 0x0145,
+       /* 0xc0 */
+       0x0146, 0x0143, 0x00ac, 0x221a,
+       0x0144, 0x0147, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x0148,
+       0x0150, 0x00d5, 0x0151, 0x014c,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x014d, 0x0154, 0x0155, 0x0158,
+       0x2039, 0x203a, 0x0159, 0x0156,
+       /* 0xe0 */
+       0x0157, 0x0160, 0x201a, 0x201e,
+       0x0161, 0x015a, 0x015b, 0x00c1,
+       0x0164, 0x0165, 0x00cd, 0x017d,
+       0x017e, 0x016a, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x016b, 0x016e, 0x00da, 0x016f,
+       0x0170, 0x0171, 0x0172, 0x0173,
+       0x00dd, 0x00fd, 0x0137, 0x017b,
+       0x0141, 0x017c, 0x0122, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0xe7, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x83, 0x00, 0x00, 0x00, 0xea, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0xf2, 0x00, 0x86, 0xf8, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x00, 0x87, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x8e, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x9c, 0x00, 0x9f, 0xf9, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x81, 0x82, 0x00, 0x00, 0x84, 0x88, 0x8c, 0x8d, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x89, 0x8b, 0x91, 0x93, /* 0x08-0x0f */
+       0x00, 0x00, 0x94, 0x95, 0x00, 0x00, 0x96, 0x98, /* 0x10-0x17 */
+       0xa2, 0xab, 0x9d, 0x9e, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xfe, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0xb1, 0xb4, 0x00, 0x00, 0xaf, 0xb0, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xfa, /* 0x30-0x37 */
+       0x00, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x00, /* 0x38-0x3f */
+       0x00, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, /* 0x40-0x47 */
+       0xcb, 0x00, 0x00, 0x00, 0xcf, 0xd8, 0x00, 0x00, /* 0x48-0x4f */
+       0xcc, 0xce, 0x00, 0x00, 0xd9, 0xda, 0xdf, 0xe0, /* 0x50-0x57 */
+       0xdb, 0xde, 0xe5, 0xe6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xe1, 0xe4, 0x00, 0x00, 0xe8, 0xe9, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0xed, 0xf0, 0x00, 0x00, 0xf1, 0xf3, /* 0x68-0x6f */
+       0xf4, 0xf5, 0xf6, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maccenteuro",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maccenteuro(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maccenteuro(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maccenteuro)
+module_exit(exit_nls_maccenteuro)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-croatian.c b/fs/nls/mac-croatian.c
new file mode 100644 (file)
index 0000000..dd3f675
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-croatian.c
+ *
+ * Charset maccroatian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x0160, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x017d, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x2206, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x0161, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x017e, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x0106, 0x00ab,
+       0x010c, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x0110, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0xf8ff, 0x00a9, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0x00c6, 0x00bb,
+       /* 0xe0 */
+       0x2013, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x0107, 0x00c1,
+       0x010d, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x0111, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x03c0, 0x00cb, 0x02da,
+       0x00b8, 0x00ca, 0x00e6, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xd9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xdf, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xde, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xfd, 0xfa, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xfe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe6, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */
+       0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xa9, 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0xfb, 0x00, 0xf7, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xe0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xb4, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maccroatian",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maccroatian(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maccroatian(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maccroatian)
+module_exit(exit_nls_maccroatian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-cyrillic.c b/fs/nls/mac-cyrillic.c
new file mode 100644 (file)
index 0000000..1112c84
--- /dev/null
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/mac-cyrillic.c
+ *
+ * Charset maccyrillic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x0410, 0x0411, 0x0412, 0x0413,
+       0x0414, 0x0415, 0x0416, 0x0417,
+       0x0418, 0x0419, 0x041a, 0x041b,
+       0x041c, 0x041d, 0x041e, 0x041f,
+       /* 0x90 */
+       0x0420, 0x0421, 0x0422, 0x0423,
+       0x0424, 0x0425, 0x0426, 0x0427,
+       0x0428, 0x0429, 0x042a, 0x042b,
+       0x042c, 0x042d, 0x042e, 0x042f,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x0490, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x0406,
+       0x00ae, 0x00a9, 0x2122, 0x0402,
+       0x0452, 0x2260, 0x0403, 0x0453,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x0456, 0x00b5, 0x0491, 0x0408,
+       0x0404, 0x0454, 0x0407, 0x0457,
+       0x0409, 0x0459, 0x040a, 0x045a,
+       /* 0xc0 */
+       0x0458, 0x0405, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x040b,
+       0x045b, 0x040c, 0x045c, 0x0455,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x201e,
+       0x040e, 0x045e, 0x040f, 0x045f,
+       0x2116, 0x0401, 0x0451, 0x044f,
+       /* 0xe0 */
+       0x0430, 0x0431, 0x0432, 0x0433,
+       0x0434, 0x0435, 0x0436, 0x0437,
+       0x0438, 0x0439, 0x043a, 0x043b,
+       0x043c, 0x043d, 0x043e, 0x043f,
+       /* 0xf0 */
+       0x0440, 0x0441, 0x0442, 0x0443,
+       0x0444, 0x0445, 0x0446, 0x0447,
+       0x0448, 0x0449, 0x044a, 0x044b,
+       0x044c, 0x044d, 0x044e, 0x20ac,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0x00, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page04[256] = {
+       0x00, 0xdd, 0xab, 0xae, 0xb8, 0xc1, 0xa7, 0xba, /* 0x00-0x07 */
+       0xb7, 0xbc, 0xbe, 0xcb, 0xcd, 0x00, 0xd8, 0xda, /* 0x08-0x0f */
+       0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */
+       0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */
+       0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */
+       0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */
+       0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */
+       0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */
+       0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */
+       0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0x48-0x4f */
+       0x00, 0xde, 0xac, 0xaf, 0xb9, 0xcf, 0xb4, 0xbb, /* 0x50-0x57 */
+       0xc0, 0xbd, 0xbf, 0xcc, 0xce, 0x00, 0xd9, 0xdb, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0xa2, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0xd7, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   NULL,   page04, NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maccyrillic",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maccyrillic(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maccyrillic(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maccyrillic)
+module_exit(exit_nls_maccyrillic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-gaelic.c b/fs/nls/mac-gaelic.c
new file mode 100644 (file)
index 0000000..2de9158
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * linux/fs/nls/mac-gaelic.c
+ *
+ * Charset macgaelic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x1e02, 0x00b1, 0x2264, 0x2265,
+       0x1e03, 0x010a, 0x010b, 0x1e0a,
+       0x1e0b, 0x1e1e, 0x1e1f, 0x0120,
+       0x0121, 0x1e40, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x1e41, 0x1e56, 0x1e57, 0x027c,
+       0x0192, 0x017f, 0x1e60, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x1e61, 0x1e9b,
+       0x00ff, 0x0178, 0x1e6a, 0x20ac,
+       0x2039, 0x203a, 0x0176, 0x0177,
+       /* 0xe0 */
+       0x1e6b, 0x00b7, 0x1ef2, 0x1ef3,
+       0x204a, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x2663, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x00dd, 0x00fd,
+       0x0174, 0x0175, 0x1e84, 0x1e85,
+       0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0x00, 0xc7, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0x00, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0x00, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0xb5, 0xb6, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0xbb, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+       0x00, 0x00, 0xb0, 0xb4, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0xb7, 0xb8, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0xba, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0xbd, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0xc2, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xc6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0xda, 0xe0, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   page1e, NULL,
+       page20, page21, page22, NULL,   NULL,   NULL,   page26, NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macgaelic",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macgaelic(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macgaelic(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macgaelic)
+module_exit(exit_nls_macgaelic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-greek.c b/fs/nls/mac-greek.c
new file mode 100644 (file)
index 0000000..a863100
--- /dev/null
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/mac-greek.c
+ *
+ * Charset macgreek translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00b9, 0x00b2, 0x00c9,
+       0x00b3, 0x00d6, 0x00dc, 0x0385,
+       0x00e0, 0x00e2, 0x00e4, 0x0384,
+       0x00a8, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00a3, 0x2122,
+       0x00ee, 0x00ef, 0x2022, 0x00bd,
+       0x2030, 0x00f4, 0x00f6, 0x00a6,
+       0x20ac, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x0393, 0x0394, 0x0398,
+       0x039b, 0x039e, 0x03a0, 0x00df,
+       0x00ae, 0x00a9, 0x03a3, 0x03aa,
+       0x00a7, 0x2260, 0x00b0, 0x00b7,
+       /* 0xb0 */
+       0x0391, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x0392, 0x0395, 0x0396,
+       0x0397, 0x0399, 0x039a, 0x039c,
+       0x03a6, 0x03ab, 0x03a8, 0x03a9,
+       /* 0xc0 */
+       0x03ac, 0x039d, 0x00ac, 0x039f,
+       0x03a1, 0x2248, 0x03a4, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x03a5,
+       0x03a7, 0x0386, 0x0388, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2015, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x0389,
+       0x038a, 0x038c, 0x038e, 0x03ad,
+       0x03ae, 0x03af, 0x03cc, 0x038f,
+       /* 0xe0 */
+       0x03cd, 0x03b1, 0x03b2, 0x03c8,
+       0x03b4, 0x03b5, 0x03c6, 0x03b3,
+       0x03b7, 0x03b9, 0x03be, 0x03ba,
+       0x03bb, 0x03bc, 0x03bd, 0x03bf,
+       /* 0xf0 */
+       0x03c0, 0x03ce, 0x03c1, 0x03c3,
+       0x03c4, 0x03b8, 0x03c9, 0x03c2,
+       0x03c7, 0x03c5, 0x03b6, 0x03ca,
+       0x03cb, 0x0390, 0x03b0, 0x00ad,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0x92, 0x00, 0xb4, 0x9b, 0xac, /* 0xa0-0xa7 */
+       0x8c, 0xa9, 0x00, 0xc7, 0xc2, 0xff, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xae, 0xb1, 0x82, 0x84, 0x00, 0x00, 0x00, 0xaf, /* 0xb0-0xb7 */
+       0x00, 0x81, 0x00, 0xc8, 0x00, 0x97, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x00, 0x89, 0x00, 0x8a, 0x00, 0x00, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x00, 0x00, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x9d, 0x00, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x8b, 0x87, 0xcd, 0x00, /* 0x80-0x87 */
+       0xce, 0xd7, 0xd8, 0x00, 0xd9, 0x00, 0xda, 0xdf, /* 0x88-0x8f */
+       0xfd, 0xb0, 0xb5, 0xa1, 0xa2, 0xb6, 0xb7, 0xb8, /* 0x90-0x97 */
+       0xa3, 0xb9, 0xba, 0xa4, 0xbb, 0xc1, 0xa5, 0xc3, /* 0x98-0x9f */
+       0xa6, 0xc4, 0x00, 0xaa, 0xc6, 0xcb, 0xbc, 0xcc, /* 0xa0-0xa7 */
+       0xbe, 0xbf, 0xab, 0xbd, 0xc0, 0xdb, 0xdc, 0xdd, /* 0xa8-0xaf */
+       0xfe, 0xe1, 0xe2, 0xe7, 0xe4, 0xe5, 0xfa, 0xe8, /* 0xb0-0xb7 */
+       0xf5, 0xe9, 0xeb, 0xec, 0xed, 0xee, 0xea, 0xef, /* 0xb8-0xbf */
+       0xf0, 0xf2, 0xf7, 0xf3, 0xf4, 0xf9, 0xe6, 0xf8, /* 0xc0-0xc7 */
+       0xe3, 0xf6, 0xfb, 0xfc, 0xde, 0xe0, 0xf1, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0x00, 0xd1, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0x96, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macgreek",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macgreek(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macgreek(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macgreek)
+module_exit(exit_nls_macgreek)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-iceland.c b/fs/nls/mac-iceland.c
new file mode 100644 (file)
index 0000000..babe299
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-iceland.c
+ *
+ * Charset maciceland translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x00dd, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x00d0, 0x00f0, 0x00de, 0x00fe,
+       /* 0xe0 */
+       0x00fd, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0xdc, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xa0, 0xde, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0xdd, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xe0, 0xdf, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maciceland",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maciceland(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maciceland(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maciceland)
+module_exit(exit_nls_maciceland)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-inuit.c b/fs/nls/mac-inuit.c
new file mode 100644 (file)
index 0000000..312364f
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/mac-inuit.c
+ *
+ * Charset macinuit translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x1403, 0x1404, 0x1405, 0x1406,
+       0x140a, 0x140b, 0x1431, 0x1432,
+       0x1433, 0x1434, 0x1438, 0x1439,
+       0x1449, 0x144e, 0x144f, 0x1450,
+       /* 0x90 */
+       0x1451, 0x1455, 0x1456, 0x1466,
+       0x146d, 0x146e, 0x146f, 0x1470,
+       0x1472, 0x1473, 0x1483, 0x148b,
+       0x148c, 0x148d, 0x148e, 0x1490,
+       /* 0xa0 */
+       0x1491, 0x00b0, 0x14a1, 0x14a5,
+       0x14a6, 0x2022, 0x00b6, 0x14a7,
+       0x00ae, 0x00a9, 0x2122, 0x14a8,
+       0x14aa, 0x14ab, 0x14bb, 0x14c2,
+       /* 0xb0 */
+       0x14c3, 0x14c4, 0x14c5, 0x14c7,
+       0x14c8, 0x14d0, 0x14ef, 0x14f0,
+       0x14f1, 0x14f2, 0x14f4, 0x14f5,
+       0x1505, 0x14d5, 0x14d6, 0x14d7,
+       /* 0xc0 */
+       0x14d8, 0x14da, 0x14db, 0x14ea,
+       0x1528, 0x1529, 0x152a, 0x152b,
+       0x152d, 0x2026, 0x00a0, 0x152e,
+       0x153e, 0x1555, 0x1556, 0x1557,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x1558, 0x1559,
+       0x155a, 0x155d, 0x1546, 0x1547,
+       0x1548, 0x1549, 0x154b, 0x154c,
+       /* 0xe0 */
+       0x1550, 0x157f, 0x1580, 0x1581,
+       0x1582, 0x1583, 0x1584, 0x1585,
+       0x158f, 0x1590, 0x1591, 0x1592,
+       0x1593, 0x1594, 0x1595, 0x1671,
+       /* 0xf0 */
+       0x1672, 0x1673, 0x1674, 0x1675,
+       0x1676, 0x1596, 0x15a0, 0x15a1,
+       0x15a2, 0x15a3, 0x15a4, 0x15a5,
+       0x15a6, 0x157c, 0x0141, 0x0142,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xa9, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page14[256] = {
+       0x00, 0x00, 0x00, 0x80, 0x81, 0x82, 0x83, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x84, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x86, 0x87, 0x88, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x8a, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x8e, /* 0x48-0x4f */
+       0x8f, 0x90, 0x00, 0x00, 0x00, 0x91, 0x92, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x95, 0x96, /* 0x68-0x6f */
+       0x97, 0x00, 0x98, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x88-0x8f */
+       0x9f, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0xa2, 0x00, 0x00, 0x00, 0xa3, 0xa4, 0xa7, /* 0xa0-0xa7 */
+       0xab, 0x00, 0xac, 0xad, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0xaf, 0xb0, 0xb1, 0xb2, 0x00, 0xb3, /* 0xc0-0xc7 */
+       0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0xb5, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xbe, 0xbf, /* 0xd0-0xd7 */
+       0xc0, 0x00, 0xc1, 0xc2, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0xb6, /* 0xe8-0xef */
+       0xb7, 0xb8, 0xb9, 0x00, 0xba, 0xbb, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page15[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0xc4, 0xc5, 0xc6, 0xc7, 0x00, 0xc8, 0xcb, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x40-0x47 */
+       0xdc, 0xdd, 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0xe0, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xce, 0xcf, /* 0x50-0x57 */
+       0xd6, 0xd7, 0xd8, 0x00, 0x00, 0xd9, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0xe1, /* 0x78-0x7f */
+       0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, /* 0x88-0x8f */
+       0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xf5, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page16[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   page14, page15, page16, NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macinuit",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macinuit(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macinuit(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macinuit)
+module_exit(exit_nls_macinuit)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-roman.c b/fs/nls/mac-roman.c
new file mode 100644 (file)
index 0000000..53ce080
--- /dev/null
@@ -0,0 +1,637 @@
+/*
+ * linux/fs/nls/mac-roman.c
+ *
+ * Charset macroman translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0xfb01, 0xfb02,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char pagefb[256] = {
+       0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   pagefb, NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macroman",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macroman(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macroman(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macroman)
+module_exit(exit_nls_macroman)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-romanian.c b/fs/nls/mac-romanian.c
new file mode 100644 (file)
index 0000000..add6f7a
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-romanian.c
+ *
+ * Charset macromanian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x0102, 0x0218,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x0103, 0x0219,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0x021a, 0x021b,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0x00, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0x00, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0x00, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0xae, 0xbe, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xaf, 0xbf, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macromanian",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macromanian(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macromanian(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macromanian)
+module_exit(exit_nls_macromanian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-turkish.c b/fs/nls/mac-turkish.c
new file mode 100644 (file)
index 0000000..dffa96d
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-turkish.c
+ *
+ * Charset macturkish translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x011e, 0x011f,
+       0x0130, 0x0131, 0x015e, 0x015f,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0xf8a0, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xdf, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macturkish",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macturkish(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macturkish(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macturkish)
+module_exit(exit_nls_macturkish)
+
+MODULE_LICENSE("Dual BSD/GPL");
index ccb14d3fc0de99790d282ae17ce984338ca309ab..b39c5c161adb64bff0d33faa64f41d8f4a9942cd 100644 (file)
@@ -123,7 +123,7 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
 }
 EXPORT_SYMBOL_GPL(__fsnotify_parent);
 
-static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
+static int send_to_group(struct inode *to_tell,
                         struct fsnotify_mark *inode_mark,
                         struct fsnotify_mark *vfsmount_mark,
                         __u32 mask, void *data,
@@ -168,10 +168,10 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
                        vfsmount_test_mask &= ~inode_mark->ignored_mask;
        }
 
-       pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p"
+       pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
                 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
                 " data=%p data_is=%d cookie=%d event=%p\n",
-                __func__, group, to_tell, mnt, mask, inode_mark,
+                __func__, group, to_tell, mask, inode_mark,
                 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
                 data_is, cookie, *event);
 
@@ -258,16 +258,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
 
                if (inode_group > vfsmount_group) {
                        /* handle inode */
-                       ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
+                       ret = send_to_group(to_tell, inode_mark, NULL, mask, data,
                                            data_is, cookie, file_name, &event);
                        /* we didn't use the vfsmount_mark */
                        vfsmount_group = NULL;
                } else if (vfsmount_group > inode_group) {
-                       ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data,
+                       ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, data,
                                            data_is, cookie, file_name, &event);
                        inode_group = NULL;
                } else {
-                       ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark,
+                       ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
                                            mask, data, data_is, cookie, file_name,
                                            &event);
                }
index 8639169221c7aed21c0bd600ab4ef1a0d8102cb1..7389d2d5e51d257c72f9fb0c1468c38a28b309e4 100644 (file)
@@ -2096,7 +2096,9 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
        err = file_remove_suid(file);
        if (err)
                goto out;
-       file_update_time(file);
+       err = file_update_time(file);
+       if (err)
+               goto out;
        written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
                        count);
 out:
index c7ee03c22226253d970cce94beb11f6353b3e1d0..0725e605465040b6b1e7c5e7744c5243968158c9 100644 (file)
@@ -422,45 +422,46 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
                               struct ocfs2_blockcheck_stats *stats)
 {
        int rc = 0;
-       struct ocfs2_block_check check;
+       u32 bc_crc32e;
+       u16 bc_ecc;
        u32 crc, ecc;
 
        ocfs2_blockcheck_inc_check(stats);
 
-       check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
-       check.bc_ecc = le16_to_cpu(bc->bc_ecc);
+       bc_crc32e = le32_to_cpu(bc->bc_crc32e);
+       bc_ecc = le16_to_cpu(bc->bc_ecc);
 
        memset(bc, 0, sizeof(struct ocfs2_block_check));
 
        /* Fast path - if the crc32 validates, we're good to go */
        crc = crc32_le(~0, data, blocksize);
-       if (crc == check.bc_crc32e)
+       if (crc == bc_crc32e)
                goto out;
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
             "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
        ecc = ocfs2_hamming_encode_block(data, blocksize);
-       ocfs2_hamming_fix_block(data, blocksize, ecc ^ check.bc_ecc);
+       ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc);
 
        /* And check the crc32 again */
        crc = crc32_le(~0, data, blocksize);
-       if (crc == check.bc_crc32e) {
+       if (crc == bc_crc32e) {
                ocfs2_blockcheck_inc_recover(stats);
                goto out;
        }
 
        mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
 
 out:
-       bc->bc_crc32e = cpu_to_le32(check.bc_crc32e);
-       bc->bc_ecc = cpu_to_le16(check.bc_ecc);
+       bc->bc_crc32e = cpu_to_le32(bc_crc32e);
+       bc->bc_ecc = cpu_to_le16(bc_ecc);
 
        return rc;
 }
@@ -528,7 +529,8 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
                                   struct ocfs2_blockcheck_stats *stats)
 {
        int i, rc = 0;
-       struct ocfs2_block_check check;
+       u32 bc_crc32e;
+       u16 bc_ecc;
        u32 crc, ecc, fix;
 
        BUG_ON(nr < 0);
@@ -538,21 +540,21 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
 
        ocfs2_blockcheck_inc_check(stats);
 
-       check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
-       check.bc_ecc = le16_to_cpu(bc->bc_ecc);
+       bc_crc32e = le32_to_cpu(bc->bc_crc32e);
+       bc_ecc = le16_to_cpu(bc->bc_ecc);
 
        memset(bc, 0, sizeof(struct ocfs2_block_check));
 
        /* Fast path - if the crc32 validates, we're good to go */
        for (i = 0, crc = ~0; i < nr; i++)
                crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
-       if (crc == check.bc_crc32e)
+       if (crc == bc_crc32e)
                goto out;
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
             "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
        for (i = 0, ecc = 0; i < nr; i++) {
@@ -565,7 +567,7 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
                                                bhs[i]->b_size * 8,
                                                bhs[i]->b_size * 8 * i);
        }
-       fix = ecc ^ check.bc_ecc;
+       fix = ecc ^ bc_ecc;
        for (i = 0; i < nr; i++) {
                /*
                 * Try the fix against each buffer.  It will only affect
@@ -578,19 +580,19 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
        /* And check the crc32 again */
        for (i = 0, crc = ~0; i < nr; i++)
                crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
-       if (crc == check.bc_crc32e) {
+       if (crc == bc_crc32e) {
                ocfs2_blockcheck_inc_recover(stats);
                goto out;
        }
 
        mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
 
 out:
-       bc->bc_crc32e = cpu_to_le32(check.bc_crc32e);
-       bc->bc_ecc = cpu_to_le16(check.bc_ecc);
+       bc->bc_crc32e = cpu_to_le32(bc_crc32e);
+       bc->bc_ecc = cpu_to_le16(bc_ecc);
 
        return rc;
 }
index 3a3ed4bb794b0d6c75e7e321b042b1b4128fbd27..fbec0be6232622ddda0c3ed4ed49c50cc0129386 100644 (file)
@@ -293,7 +293,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
        struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
        char *name;
        struct list_head *iter, *head=NULL;
-       u64 cookie;
+       __be64 cookie;
        u32 flags;
        u8 node;
 
index a5952ceecba5a83147389ad4a1cd24972ee0bfbe..de854cca12a2d23dea5652d3dad38461c7dbde13 100644 (file)
@@ -679,7 +679,7 @@ struct dlm_query_join_packet {
 };
 
 union dlm_query_join_response {
-       u32 intval;
+       __be32 intval;
        struct dlm_query_join_packet packet;
 };
 
@@ -755,8 +755,8 @@ struct dlm_query_region {
 struct dlm_node_info {
        u8 ni_nodenum;
        u8 pad1;
-       u16 ni_ipv4_port;
-       u32 ni_ipv4_address;
+       __be16 ni_ipv4_port;
+       __be32 ni_ipv4_address;
 };
 
 struct dlm_query_nodeinfo {
index 92f2ead0fab6de22fa138cc4410dee6e1544216c..9e89d70df337fc98836e87f90e38887a716843e6 100644 (file)
@@ -818,7 +818,7 @@ static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
        union dlm_query_join_response response;
 
        response.packet = *packet;
-       *wire = cpu_to_be32(response.intval);
+       *wire = be32_to_cpu(response.intval);
 }
 
 static void dlm_query_join_wire_to_packet(u32 wire,
index 81a4cd22f80be84a06eac2b0fbf4348385d76262..4f7795fb5fc0b78a6f58d28f2de69356ff5e197d 100644 (file)
@@ -456,7 +456,7 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
        stats->ls_gets++;
        stats->ls_total += ktime_to_ns(kt);
        /* overflow */
-       if (unlikely(stats->ls_gets) == 0) {
+       if (unlikely(stats->ls_gets == 0)) {
                stats->ls_gets++;
                stats->ls_total = ktime_to_ns(kt);
        }
@@ -3932,6 +3932,8 @@ unqueue:
 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
                                        struct ocfs2_lock_res *lockres)
 {
+       unsigned long flags;
+
        assert_spin_locked(&lockres->l_lock);
 
        if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -3945,21 +3947,22 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
 
        lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&lockres->l_blocked_list)) {
                list_add_tail(&lockres->l_blocked_list,
                              &osb->blocked_lock_list);
                osb->blocked_lock_count++;
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
 {
        unsigned long processed;
+       unsigned long flags;
        struct ocfs2_lock_res *lockres;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* grab this early so we know to try again if a state change and
         * wake happens part-way through our work  */
        osb->dc_work_sequence = osb->dc_wake_sequence;
@@ -3972,38 +3975,40 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
                osb->blocked_lock_count--;
-               spin_unlock(&osb->dc_task_lock);
+               spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
                BUG_ON(!processed);
                processed--;
 
                ocfs2_process_blocked_lock(osb, lockres);
 
-               spin_lock(&osb->dc_task_lock);
+               spin_lock_irqsave(&osb->dc_task_lock, flags);
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
 {
        int empty = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&osb->blocked_lock_list))
                empty = 1;
 
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        return empty;
 }
 
 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
 {
        int should_wake = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (osb->dc_work_sequence != osb->dc_wake_sequence)
                should_wake = 1;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
        return should_wake;
 }
@@ -4033,10 +4038,12 @@ static int ocfs2_downconvert_thread(void *arg)
 
 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
 {
-       spin_lock(&osb->dc_task_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* make sure the voting thread gets a swipe at whatever changes
         * the caller may have made to the voting state */
        osb->dc_wake_sequence++;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        wake_up(&osb->dc_event);
 }
index 745db42528d5fd2f875a099177158f347fd361e7..322216a5f0dd1e0f2e178540781b3c6fd263c985 100644 (file)
@@ -177,21 +177,23 @@ bail:
        return parent;
 }
 
-static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
-                          int connectable)
+static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
+                          struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
        int len = *max_len;
        int type = 1;
        u64 blkno;
        u32 generation;
        __le32 *fh = (__force __le32 *) fh_in;
 
+#ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION
+#error "You go ahead and fix that mess, then.  Somehow"
        trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
                                    dentry->d_name.name,
                                    fh, len, connectable);
+#endif
 
-       if (connectable && (len < 6)) {
+       if (parent && (len < 6)) {
                *max_len = 6;
                type = 255;
                goto bail;
@@ -211,12 +213,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
        fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff));
        fh[2] = cpu_to_le32(generation);
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                blkno = OCFS2_I(parent)->ip_blkno;
                generation = parent->i_generation;
 
@@ -224,8 +221,6 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
                fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
                fh[5] = cpu_to_le32(generation);
 
-               spin_unlock(&dentry->d_lock);
-
                len = 6;
                type = 2;
 
index 2f5b92ef0e533146007b49d21dd705a242125dc5..70b5863a2d64e05cde6474bd387112f3b470b5e7 100644 (file)
@@ -923,8 +923,6 @@ out_unlock:
 
        ocfs2_inode_unlock(inode, 0);
 out:
-       if (ret && ret != -ENXIO)
-               ret = -ENXIO;
        return ret;
 }
 
index 061591a3ab08a673d73544e819e7b7fd19b2689a..7602783d7f41c9a01827f16446da43e036e6a7cb 100644 (file)
@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
-       if (file->f_flags & O_SYNC)
+       if (file && (file->f_flags & O_SYNC))
                handle->h_sync = 1;
 
        ocfs2_commit_trans(osb, handle);
@@ -2422,8 +2422,10 @@ out_dio:
                unaligned_dio = 0;
        }
 
-       if (unaligned_dio)
+       if (unaligned_dio) {
+               ocfs2_iocb_clear_unaligned_aio(iocb);
                atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+       }
 
 out:
        if (rw_level != -1)
index 735514ca400f7942268dff8f387c7c029506b859..d89e08a81eda8875fcd59d76c5e1d75827e7d7ac 100644 (file)
@@ -273,11 +273,13 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
        inode->i_gid = le32_to_cpu(fe->i_gid);
 
        /* Fast symlinks will have i_size but no allocated clusters. */
-       if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
+       if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
                inode->i_blocks = 0;
-       else
+               inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
+       } else {
                inode->i_blocks = ocfs2_inode_sector_count(inode);
-       inode->i_mapping->a_ops = &ocfs2_aops;
+               inode->i_mapping->a_ops = &ocfs2_aops;
+       }
        inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
        inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
        inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
@@ -331,10 +333,7 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
                    OCFS2_I(inode)->ip_dir_lock_gen = 1;
                    break;
            case S_IFLNK:
-                   if (ocfs2_inode_is_fast_symlink(inode))
-                       inode->i_op = &ocfs2_fast_symlink_inode_operations;
-                   else
-                       inode->i_op = &ocfs2_symlink_inode_operations;
+                   inode->i_op = &ocfs2_symlink_inode_operations;
                    i_size_write(inode, le64_to_cpu(fe->i_size));
                    break;
            default:
index a1a1bfd652c90d49521ad3ea12a908f9a168c1e9..d96f7f81d8dd3257f49bb02885db296af22881cb 100644 (file)
@@ -864,7 +864,7 @@ int ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info,
                if (status)
                        break;
 
-               reqp = (struct ocfs2_info_request *)(unsigned long)req_addr;
+               reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
                if (!reqp) {
                        status = -EINVAL;
                        goto bail;
@@ -888,9 +888,11 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        struct ocfs2_space_resv sr;
        struct ocfs2_new_group_input input;
        struct reflink_arguments args;
-       const char *old_path, *new_path;
+       const char __user *old_path;
+       const char __user *new_path;
        bool preserve;
        struct ocfs2_info info;
+       void __user *argp = (void __user *)arg;
 
        switch (cmd) {
        case OCFS2_IOC_GETFLAGS:
@@ -937,17 +939,15 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
                return ocfs2_group_add(inode, &input);
        case OCFS2_IOC_REFLINK:
-               if (copy_from_user(&args, (struct reflink_arguments *)arg,
-                                  sizeof(args)))
+               if (copy_from_user(&args, argp, sizeof(args)))
                        return -EFAULT;
-               old_path = (const char *)(unsigned long)args.old_path;
-               new_path = (const char *)(unsigned long)args.new_path;
+               old_path = (const char __user *)(unsigned long)args.old_path;
+               new_path = (const char __user *)(unsigned long)args.new_path;
                preserve = (args.preserve != 0);
 
                return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
        case OCFS2_IOC_INFO:
-               if (copy_from_user(&info, (struct ocfs2_info __user *)arg,
-                                  sizeof(struct ocfs2_info)))
+               if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
                        return -EFAULT;
 
                return ocfs2_info_handle(inode, &info, 0);
@@ -960,22 +960,20 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
 
-               if (copy_from_user(&range, (struct fstrim_range *)arg,
-                   sizeof(range)))
+               if (copy_from_user(&range, argp, sizeof(range)))
                        return -EFAULT;
 
                ret = ocfs2_trim_fs(sb, &range);
                if (ret < 0)
                        return ret;
 
-               if (copy_to_user((struct fstrim_range *)arg, &range,
-                   sizeof(range)))
+               if (copy_to_user(argp, &range, sizeof(range)))
                        return -EFAULT;
 
                return 0;
        }
        case OCFS2_IOC_MOVE_EXT:
-               return ocfs2_ioctl_move_extents(filp, (void __user *)arg);
+               return ocfs2_ioctl_move_extents(filp, argp);
        default:
                return -ENOTTY;
        }
@@ -988,6 +986,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        struct reflink_arguments args;
        struct inode *inode = file->f_path.dentry->d_inode;
        struct ocfs2_info info;
+       void __user *argp = (void __user *)arg;
 
        switch (cmd) {
        case OCFS2_IOC32_GETFLAGS:
@@ -1006,16 +1005,14 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case FITRIM:
                break;
        case OCFS2_IOC_REFLINK:
-               if (copy_from_user(&args, (struct reflink_arguments *)arg,
-                                  sizeof(args)))
+               if (copy_from_user(&args, argp, sizeof(args)))
                        return -EFAULT;
                preserve = (args.preserve != 0);
 
                return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
                                           compat_ptr(args.new_path), preserve);
        case OCFS2_IOC_INFO:
-               if (copy_from_user(&info, (struct ocfs2_info __user *)arg,
-                                  sizeof(struct ocfs2_info)))
+               if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
                        return -EFAULT;
 
                return ocfs2_info_handle(inode, &info, 1);
index b1e3fce72ea4767bf795c692e98faacebc42797c..6083432f667e3077eb466842ef0f00136d0b4b6f 100644 (file)
@@ -1082,8 +1082,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
        context->file = filp;
 
        if (argp) {
-               if (copy_from_user(&range, (struct ocfs2_move_extents *)argp,
-                                  sizeof(range))) {
+               if (copy_from_user(&range, argp, sizeof(range))) {
                        status = -EFAULT;
                        goto out;
                }
@@ -1138,8 +1137,7 @@ out:
         * length and new_offset even if failure happens somewhere.
         */
        if (argp) {
-               if (copy_to_user((struct ocfs2_move_extents *)argp, &range,
-                               sizeof(range)))
+               if (copy_to_user(argp, &range, sizeof(range)))
                        status = -EFAULT;
        }
 
index a9856e3eaaf09753b4921d56ccdfb172db5cad7b..9f39c640cddf2076b951295dde5ef68217b26452 100644 (file)
@@ -1724,15 +1724,16 @@ static int ocfs2_symlink(struct inode *dir,
        fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
        inode->i_rdev = 0;
        newsize = l - 1;
+       inode->i_op = &ocfs2_symlink_inode_operations;
        if (l > ocfs2_fast_symlink_chars(sb)) {
                u32 offset = 0;
 
-               inode->i_op = &ocfs2_symlink_inode_operations;
                status = dquot_alloc_space_nodirty(inode,
                    ocfs2_clusters_to_bytes(osb->sb, 1));
                if (status)
                        goto bail;
                did_quota = 1;
+               inode->i_mapping->a_ops = &ocfs2_aops;
                status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0,
                                              new_fe_bh,
                                              handle, data_ac, NULL,
@@ -1750,7 +1751,7 @@ static int ocfs2_symlink(struct inode *dir,
                i_size_write(inode, newsize);
                inode->i_blocks = ocfs2_inode_sector_count(inode);
        } else {
-               inode->i_op = &ocfs2_fast_symlink_inode_operations;
+               inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
                memcpy((char *) fe->id2.i_symlink, symname, l);
                i_size_write(inode, newsize);
                inode->i_blocks = 0;
index 92fcd575775a0d1123e902b8499cd1b26cd52aab..0a86e302655f3384435ab4843fad50b6bf1a7cae 100644 (file)
@@ -399,8 +399,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
                              msecs_to_jiffies(oinfo->dqi_syncms));
 
 out_err:
-       if (status)
-               mlog_errno(status);
        return status;
 out_unlock:
        ocfs2_unlock_global_qf(oinfo, 0);
index 5d22872e2bb36012b711ac7720a90bee24717b15..f1fbb4b552ad3649238becdd9c21d4b138d5c8d7 100644 (file)
 #include "buffer_head_io.h"
 
 
-static char *ocfs2_fast_symlink_getlink(struct inode *inode,
-                                       struct buffer_head **bh)
+static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
 {
-       int status;
-       char *link = NULL;
+       struct inode *inode = page->mapping->host;
+       struct buffer_head *bh;
+       int status = ocfs2_read_inode_block(inode, &bh);
        struct ocfs2_dinode *fe;
+       const char *link;
+       void *kaddr;
+       size_t len;
 
-       status = ocfs2_read_inode_block(inode, bh);
        if (status < 0) {
                mlog_errno(status);
-               link = ERR_PTR(status);
-               goto bail;
+               return status;
        }
 
-       fe = (struct ocfs2_dinode *) (*bh)->b_data;
+       fe = (struct ocfs2_dinode *) bh->b_data;
        link = (char *) fe->id2.i_symlink;
-bail:
-
-       return link;
-}
-
-static int ocfs2_readlink(struct dentry *dentry,
-                         char __user *buffer,
-                         int buflen)
-{
-       int ret;
-       char *link;
-       struct buffer_head *bh = NULL;
-       struct inode *inode = dentry->d_inode;
-
-       link = ocfs2_fast_symlink_getlink(inode, &bh);
-       if (IS_ERR(link)) {
-               ret = PTR_ERR(link);
-               goto out;
-       }
-
-       /*
-        * Without vfsmount we can't update atime now,
-        * but we will update atime here ultimately.
-        */
-       ret = vfs_readlink(dentry, buffer, buflen, link);
-
+       /* will be less than a page size */
+       len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb));
+       kaddr = kmap_atomic(page);
+       memcpy(kaddr, link, len + 1);
+       kunmap_atomic(kaddr);
+       SetPageUptodate(page);
+       unlock_page(page);
        brelse(bh);
-out:
-       if (ret < 0)
-               mlog_errno(ret);
-       return ret;
+       return 0;
 }
 
-static void *ocfs2_fast_follow_link(struct dentry *dentry,
-                                   struct nameidata *nd)
-{
-       int status = 0;
-       int len;
-       char *target, *link = ERR_PTR(-ENOMEM);
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head *bh = NULL;
-
-       BUG_ON(!ocfs2_inode_is_fast_symlink(inode));
-       target = ocfs2_fast_symlink_getlink(inode, &bh);
-       if (IS_ERR(target)) {
-               status = PTR_ERR(target);
-               mlog_errno(status);
-               goto bail;
-       }
-
-       /* Fast symlinks can't be large */
-       len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
-       link = kzalloc(len + 1, GFP_NOFS);
-       if (!link) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
-       memcpy(link, target, len);
-
-bail:
-       nd_set_link(nd, status ? ERR_PTR(status) : link);
-       brelse(bh);
-
-       if (status)
-               mlog_errno(status);
-       return NULL;
-}
-
-static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
-{
-       char *link = nd_get_link(nd);
-       if (!IS_ERR(link))
-               kfree(link);
-}
+const struct address_space_operations ocfs2_fast_symlink_aops = {
+       .readpage               = ocfs2_fast_symlink_readpage,
+};
 
 const struct inode_operations ocfs2_symlink_inode_operations = {
-       .readlink       = page_readlink,
+       .readlink       = generic_readlink,
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
        .getattr        = ocfs2_getattr,
@@ -159,15 +98,3 @@ const struct inode_operations ocfs2_symlink_inode_operations = {
        .removexattr    = generic_removexattr,
        .fiemap         = ocfs2_fiemap,
 };
-const struct inode_operations ocfs2_fast_symlink_inode_operations = {
-       .readlink       = ocfs2_readlink,
-       .follow_link    = ocfs2_fast_follow_link,
-       .put_link       = ocfs2_fast_put_link,
-       .getattr        = ocfs2_getattr,
-       .setattr        = ocfs2_setattr,
-       .setxattr       = generic_setxattr,
-       .getxattr       = generic_getxattr,
-       .listxattr      = ocfs2_listxattr,
-       .removexattr    = generic_removexattr,
-       .fiemap         = ocfs2_fiemap,
-};
index 65a6c9c6ad51d1018147cff4685743dd22bae935..71ee4245e9192274552ef9492412b36b068e72d6 100644 (file)
@@ -27,7 +27,7 @@
 #define OCFS2_SYMLINK_H
 
 extern const struct inode_operations ocfs2_symlink_inode_operations;
-extern const struct inode_operations ocfs2_fast_symlink_inode_operations;
+extern const struct address_space_operations ocfs2_fast_symlink_aops;
 
 /*
  * Test whether an inode is a fast symlink.
index d54301219d04f1c8fed18d6de15ed590a593e2ab..1540632d8387fe98a51d0193201346acb18ae70e 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -397,10 +397,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 {
        struct file *file;
        struct inode *inode;
-       int error;
+       int error, fput_needed;
 
        error = -EBADF;
-       file = fget(fd);
+       file = fget_raw_light(fd, &fput_needed);
        if (!file)
                goto out;
 
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
        if (!error)
                set_fs_pwd(current->fs, &file->f_path);
 out_putf:
-       fput(file);
+       fput_light(file, fput_needed);
 out:
        return error;
 }
@@ -654,10 +654,23 @@ static inline int __get_file_write_access(struct inode *inode,
        return error;
 }
 
-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
-                                       struct file *f,
-                                       int (*open)(struct inode *, struct file *),
-                                       const struct cred *cred)
+int open_check_o_direct(struct file *f)
+{
+       /* NB: we're sure to have correct a_ops only after f_op->open */
+       if (f->f_flags & O_DIRECT) {
+               if (!f->f_mapping->a_ops ||
+                   ((!f->f_mapping->a_ops->direct_IO) &&
+                   (!f->f_mapping->a_ops->get_xip_mem))) {
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+                                  struct file *f,
+                                  int (*open)(struct inode *, struct file *),
+                                  const struct cred *cred)
 {
        static const struct file_operations empty_fops = {};
        struct inode *inode;
@@ -713,16 +726,6 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
 
        file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
 
-       /* NB: we're sure to have correct a_ops only after f_op->open */
-       if (f->f_flags & O_DIRECT) {
-               if (!f->f_mapping->a_ops ||
-                   ((!f->f_mapping->a_ops->direct_IO) &&
-                   (!f->f_mapping->a_ops->get_xip_mem))) {
-                       fput(f);
-                       f = ERR_PTR(-EINVAL);
-               }
-       }
-
        return f;
 
 cleanup_all:
@@ -744,12 +747,29 @@ cleanup_all:
        f->f_path.dentry = NULL;
        f->f_path.mnt = NULL;
 cleanup_file:
-       put_filp(f);
        dput(dentry);
        mntput(mnt);
        return ERR_PTR(error);
 }
 
+static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+                               struct file *f,
+                               int (*open)(struct inode *, struct file *),
+                               const struct cred *cred)
+{
+       struct file *res = do_dentry_open(dentry, mnt, f, open, cred);
+       if (!IS_ERR(res)) {
+               int error = open_check_o_direct(f);
+               if (error) {
+                       fput(res);
+                       res = ERR_PTR(error);
+               }
+       } else {
+               put_filp(f);
+       }
+       return res;
+}
+
 /**
  * lookup_instantiate_filp - instantiates the open intent filp
  * @nd: pointer to nameidata
@@ -804,13 +824,31 @@ struct file *nameidata_to_filp(struct nameidata *nd)
 
        /* Pick up the filp from the open intent */
        filp = nd->intent.open.file;
-       nd->intent.open.file = NULL;
 
        /* Has the filesystem initialised the file for us? */
-       if (filp->f_path.dentry == NULL) {
+       if (filp->f_path.dentry != NULL) {
+               nd->intent.open.file = NULL;
+       } else {
+               struct file *res;
+
                path_get(&nd->path);
-               filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
-                                    NULL, cred);
+               res = do_dentry_open(nd->path.dentry, nd->path.mnt,
+                                    filp, NULL, cred);
+               if (!IS_ERR(res)) {
+                       int error;
+
+                       nd->intent.open.file = NULL;
+                       BUG_ON(res != filp);
+
+                       error = open_check_o_direct(filp);
+                       if (error) {
+                               fput(filp);
+                               filp = ERR_PTR(error);
+                       }
+               } else {
+                       /* Allow nd->intent.open.file to be recycled */
+                       filp = res;
+               }
        }
        return filp;
 }
index fec5e4ad071a36bb8783bdcc8c40c07c614340a5..49c1065256fd10d9d5fdca3cf449b1e56bd58a0a 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -654,8 +654,11 @@ out:
                wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        }
-       if (ret > 0)
-               file_update_time(filp);
+       if (ret > 0) {
+               int err = file_update_time(filp);
+               if (err)
+                       ret = err;
+       }
        return ret;
 }
 
@@ -693,7 +696,7 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
                        return put_user(count, (int __user *)arg);
                default:
-                       return -EINVAL;
+                       return -ENOIOCTLCMD;
        }
 }
 
index ab5fa9e1a79ac8277ac1cb51db6a92e55ba2c935..bed378db075813350362c39f423d1b4335240bfa 100644 (file)
@@ -257,12 +257,12 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
                prev_src_mnt  = child;
        }
 out:
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        while (!list_empty(&tmp_list)) {
                child = list_first_entry(&tmp_list, struct mount, mnt_hash);
                umount_tree(child, 0, &umount_list);
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        release_mounts(&umount_list);
        return ret;
 }
index dc4c5a7b9eceb767c0d7305ce5e1b609cdcdaef3..c1c207c36caefeb1fcbc8d782504cfcbf1b616d8 100644 (file)
@@ -370,7 +370,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task, int whole)
 {
        unsigned long vsize, eip, esp, wchan = ~0UL;
-       long priority, nice;
+       int priority, nice;
        int tty_pgrp = -1, tty_nr = 0;
        sigset_t sigign, sigcatch;
        char state;
@@ -492,7 +492,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, ' ', 0);
        seq_put_decimal_ull(m, ' ', start_time);
        seq_put_decimal_ull(m, ' ', vsize);
-       seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
+       seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
        seq_put_decimal_ull(m, ' ', rsslim);
        seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
        seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
@@ -517,9 +517,23 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
        seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
        seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
-       seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
-       seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
-       seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
+
+       if (mm && permitted) {
+               seq_put_decimal_ull(m, ' ', mm->start_data);
+               seq_put_decimal_ull(m, ' ', mm->end_data);
+               seq_put_decimal_ull(m, ' ', mm->start_brk);
+               seq_put_decimal_ull(m, ' ', mm->arg_start);
+               seq_put_decimal_ull(m, ' ', mm->arg_end);
+               seq_put_decimal_ull(m, ' ', mm->env_start);
+               seq_put_decimal_ull(m, ' ', mm->env_end);
+       } else
+               seq_printf(m, " 0 0 0 0 0 0 0");
+
+       if (permitted)
+               seq_put_decimal_ll(m, ' ', task->exit_code);
+       else
+               seq_put_decimal_ll(m, ' ', 0);
+
        seq_putc(m, '\n');
        if (mm)
                mmput(mm);
@@ -565,3 +579,126 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
 
        return 0;
 }
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static struct pid *
+get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
+{
+       struct task_struct *start, *task;
+       struct pid *pid = NULL;
+
+       read_lock(&tasklist_lock);
+
+       start = pid_task(proc_pid(inode), PIDTYPE_PID);
+       if (!start)
+               goto out;
+
+       /*
+        * Lets try to continue searching first, this gives
+        * us significant speedup on children-rich processes.
+        */
+       if (pid_prev) {
+               task = pid_task(pid_prev, PIDTYPE_PID);
+               if (task && task->real_parent == start &&
+                   !(list_empty(&task->sibling))) {
+                       if (list_is_last(&task->sibling, &start->children))
+                               goto out;
+                       task = list_first_entry(&task->sibling,
+                                               struct task_struct, sibling);
+                       pid = get_pid(task_pid(task));
+                       goto out;
+               }
+       }
+
+       /*
+        * Slow search case.
+        *
+        * We might miss some children here if children
+        * are exited while we were not holding the lock,
+        * but it was never promised to be accurate that
+        * much.
+        *
+        * "Just suppose that the parent sleeps, but N children
+        *  exit after we printed their tids. Now the slow paths
+        *  skips N extra children, we miss N tasks." (c)
+        *
+        * So one need to stop or freeze the leader and all
+        * its children to get a precise result.
+        */
+       list_for_each_entry(task, &start->children, sibling) {
+               if (pos-- == 0) {
+                       pid = get_pid(task_pid(task));
+                       break;
+               }
+       }
+
+out:
+       read_unlock(&tasklist_lock);
+       return pid;
+}
+
+static int children_seq_show(struct seq_file *seq, void *v)
+{
+       struct inode *inode = seq->private;
+       pid_t pid;
+
+       pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
+       return seq_printf(seq, "%d ", pid);
+}
+
+static void *children_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       return get_children_pid(seq->private, NULL, *pos);
+}
+
+static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct pid *pid;
+
+       pid = get_children_pid(seq->private, v, *pos + 1);
+       put_pid(v);
+
+       ++*pos;
+       return pid;
+}
+
+static void children_seq_stop(struct seq_file *seq, void *v)
+{
+       put_pid(v);
+}
+
+static const struct seq_operations children_seq_ops = {
+       .start  = children_seq_start,
+       .next   = children_seq_next,
+       .stop   = children_seq_stop,
+       .show   = children_seq_show,
+};
+
+static int children_seq_open(struct inode *inode, struct file *file)
+{
+       struct seq_file *m;
+       int ret;
+
+       ret = seq_open(file, &children_seq_ops);
+       if (ret)
+               return ret;
+
+       m = file->private_data;
+       m->private = inode;
+
+       return ret;
+}
+
+int children_seq_release(struct inode *inode, struct file *file)
+{
+       seq_release(inode, file);
+       return 0;
+}
+
+const struct file_operations proc_tid_children_operations = {
+       .open    = children_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = children_seq_release,
+};
+#endif /* CONFIG_CHECKPOINT_RESTORE */
index d7d711876b6a00e8bf3ba659db8f59d69d6862dc..437195f204e14e908bdda87ace0e8b0ce81efb21 100644 (file)
@@ -199,11 +199,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
        return result;
 }
 
-struct mm_struct *mm_for_maps(struct task_struct *task)
-{
-       return mm_access(task, PTRACE_MODE_READ);
-}
-
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
 {
        int res = 0;
@@ -243,7 +238,7 @@ out:
 
 static int proc_pid_auxv(struct task_struct *task, char *buffer)
 {
-       struct mm_struct *mm = mm_for_maps(task);
+       struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
        int res = PTR_ERR(mm);
        if (mm && !IS_ERR(mm)) {
                unsigned int nwords = 0;
@@ -679,7 +674,7 @@ static const struct file_operations proc_single_file_operations = {
        .release        = single_release,
 };
 
-static int mem_open(struct inode* inode, struct file* file)
+static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
 {
        struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
        struct mm_struct *mm;
@@ -687,7 +682,7 @@ static int mem_open(struct inode* inode, struct file* file)
        if (!task)
                return -ESRCH;
 
-       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       mm = mm_access(task, mode);
        put_task_struct(task);
 
        if (IS_ERR(mm))
@@ -707,6 +702,11 @@ static int mem_open(struct inode* inode, struct file* file)
        return 0;
 }
 
+static int mem_open(struct inode *inode, struct file *file)
+{
+       return __mem_open(inode, file, PTRACE_MODE_ATTACH);
+}
+
 static ssize_t mem_rw(struct file *file, char __user *buf,
                        size_t count, loff_t *ppos, int write)
 {
@@ -803,30 +803,29 @@ static const struct file_operations proc_mem_operations = {
        .release        = mem_release,
 };
 
+static int environ_open(struct inode *inode, struct file *file)
+{
+       return __mem_open(inode, file, PTRACE_MODE_READ);
+}
+
 static ssize_t environ_read(struct file *file, char __user *buf,
                        size_t count, loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
        char *page;
        unsigned long src = *ppos;
-       int ret = -ESRCH;
-       struct mm_struct *mm;
+       int ret = 0;
+       struct mm_struct *mm = file->private_data;
 
-       if (!task)
-               goto out_no_task;
+       if (!mm)
+               return 0;
 
-       ret = -ENOMEM;
        page = (char *)__get_free_page(GFP_TEMPORARY);
        if (!page)
-               goto out;
-
-
-       mm = mm_for_maps(task);
-       ret = PTR_ERR(mm);
-       if (!mm || IS_ERR(mm))
-               goto out_free;
+               return -ENOMEM;
 
        ret = 0;
+       if (!atomic_inc_not_zero(&mm->mm_users))
+               goto free;
        while (count > 0) {
                int this_len, retval, max_len;
 
@@ -838,7 +837,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
                max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
                this_len = (this_len > max_len) ? max_len : this_len;
 
-               retval = access_process_vm(task, (mm->env_start + src),
+               retval = access_remote_vm(mm, (mm->env_start + src),
                        page, this_len, 0);
 
                if (retval <= 0) {
@@ -857,19 +856,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
                count -= retval;
        }
        *ppos = src;
-
        mmput(mm);
-out_free:
+
+free:
        free_page((unsigned long) page);
-out:
-       put_task_struct(task);
-out_no_task:
        return ret;
 }
 
 static const struct file_operations proc_environ_operations = {
+       .open           = environ_open,
        .read           = environ_read,
        .llseek         = generic_file_llseek,
+       .release        = mem_release,
 };
 
 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
@@ -1805,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                        rcu_read_lock();
                        file = fcheck_files(files, fd);
                        if (file) {
-                               unsigned i_mode, f_mode = file->f_mode;
+                               unsigned f_mode = file->f_mode;
 
                                rcu_read_unlock();
                                put_files_struct(files);
@@ -1821,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                                        inode->i_gid = GLOBAL_ROOT_GID;
                                }
 
-                               i_mode = S_IFLNK;
-                               if (f_mode & FMODE_READ)
-                                       i_mode |= S_IRUSR | S_IXUSR;
-                               if (f_mode & FMODE_WRITE)
-                                       i_mode |= S_IWUSR | S_IXUSR;
-                               inode->i_mode = i_mode;
+                               if (S_ISLNK(inode->i_mode)) {
+                                       unsigned i_mode = S_IFLNK;
+                                       if (f_mode & FMODE_READ)
+                                               i_mode |= S_IRUSR | S_IXUSR;
+                                       if (f_mode & FMODE_WRITE)
+                                               i_mode |= S_IWUSR | S_IXUSR;
+                                       inode->i_mode = i_mode;
+                               }
 
                                security_task_to_inode(task, inode);
                                put_task_struct(task);
@@ -1850,7 +1850,7 @@ static const struct dentry_operations tid_fd_dentry_operations =
 static struct dentry *proc_fd_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
-       unsigned fd = *(const unsigned *)ptr;
+       unsigned fd = (unsigned long)ptr;
        struct inode *inode;
        struct proc_inode *ei;
        struct dentry *error = ERR_PTR(-ENOENT);
@@ -1861,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
        ei = PROC_I(inode);
        ei->fd = fd;
 
+       inode->i_mode = S_IFLNK;
        inode->i_op = &proc_pid_link_inode_operations;
        inode->i_size = 64;
        ei->op.proc_get_link = proc_fd_link;
@@ -1887,7 +1888,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
        if (fd == ~0U)
                goto out;
 
-       result = instantiate(dir, dentry, task, &fd);
+       result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
 out:
        put_task_struct(task);
 out_no_task:
@@ -1930,21 +1931,22 @@ static int proc_readfd_common(struct file * filp, void * dirent,
                             fd++, filp->f_pos++) {
                                char name[PROC_NUMBUF];
                                int len;
+                               int rv;
 
                                if (!fcheck_files(files, fd))
                                        continue;
                                rcu_read_unlock();
 
                                len = snprintf(name, sizeof(name), "%d", fd);
-                               if (proc_fill_cache(filp, dirent, filldir,
-                                                   name, len, instantiate,
-                                                   p, &fd) < 0) {
-                                       rcu_read_lock();
-                                       break;
-                               }
+                               rv = proc_fill_cache(filp, dirent, filldir,
+                                                    name, len, instantiate, p,
+                                                    (void *)(unsigned long)fd);
+                               if (rv < 0)
+                                       goto out_fd_loop;
                                rcu_read_lock();
                        }
                        rcu_read_unlock();
+out_fd_loop:
                        put_files_struct(files);
        }
 out:
@@ -2024,11 +2026,8 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
        if (!task)
                goto out_notask;
 
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
-               goto out;
-
-       mm = get_task_mm(task);
-       if (!mm)
+       mm = mm_access(task, PTRACE_MODE_READ);
+       if (IS_ERR_OR_NULL(mm))
                goto out;
 
        if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
@@ -2357,7 +2356,7 @@ static const struct inode_operations proc_fd_inode_operations = {
 static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
-       unsigned fd = *(unsigned *)ptr;
+       unsigned fd = (unsigned long)ptr;
        struct inode *inode;
        struct proc_inode *ei;
        struct dentry *error = ERR_PTR(-ENOENT);
@@ -3404,6 +3403,9 @@ static const struct pid_entry tid_base_stuff[] = {
        ONE("stat",      S_IRUGO, proc_tid_stat),
        ONE("statm",     S_IRUGO, proc_pid_statm),
        REG("maps",      S_IRUGO, proc_tid_maps_operations),
+#ifdef CONFIG_CHECKPOINT_RESTORE
+       REG("children",  S_IRUGO, proc_tid_children_operations),
+#endif
 #ifdef CONFIG_NUMA
        REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
 #endif
index 5f79bb8b4c60211620c8af8212e9ec97c0978dc1..eca4aca5b6e227c11bb13c100deac3861b747a44 100644 (file)
@@ -31,8 +31,6 @@ struct vmalloc_info {
        unsigned long   largest_chunk;
 };
 
-extern struct mm_struct *mm_for_maps(struct task_struct *);
-
 #ifdef CONFIG_MMU
 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
 extern void get_vmalloc_info(struct vmalloc_info *vmi);
@@ -56,6 +54,7 @@ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
                                struct pid *pid, struct task_struct *task);
 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
 
+extern const struct file_operations proc_tid_children_operations;
 extern const struct file_operations proc_pid_maps_operations;
 extern const struct file_operations proc_tid_maps_operations;
 extern const struct file_operations proc_pid_numa_maps_operations;
index 7faaf2acc57032c060896234a2581048887b0b1d..4540b8f76f163fbaaef250b6facf161424a90869 100644 (file)
@@ -125,7 +125,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = mm_for_maps(priv->task);
+       mm = mm_access(priv->task, PTRACE_MODE_READ);
        if (!mm || IS_ERR(mm))
                return mm;
        down_read(&mm->mmap_sem);
@@ -393,6 +393,7 @@ struct mem_size_stats {
        unsigned long anonymous;
        unsigned long anonymous_thp;
        unsigned long swap;
+       unsigned long nonlinear;
        u64 pss;
 };
 
@@ -402,24 +403,33 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
 {
        struct mem_size_stats *mss = walk->private;
        struct vm_area_struct *vma = mss->vma;
-       struct page *page;
+       pgoff_t pgoff = linear_page_index(vma, addr);
+       struct page *page = NULL;
        int mapcount;
 
-       if (is_swap_pte(ptent)) {
-               mss->swap += ptent_size;
-               return;
+       if (pte_present(ptent)) {
+               page = vm_normal_page(vma, addr, ptent);
+       } else if (is_swap_pte(ptent)) {
+               swp_entry_t swpent = pte_to_swp_entry(ptent);
+
+               if (!non_swap_entry(swpent))
+                       mss->swap += ptent_size;
+               else if (is_migration_entry(swpent))
+                       page = migration_entry_to_page(swpent);
+       } else if (pte_file(ptent)) {
+               if (pte_to_pgoff(ptent) != pgoff)
+                       mss->nonlinear += ptent_size;
        }
 
-       if (!pte_present(ptent))
-               return;
-
-       page = vm_normal_page(vma, addr, ptent);
        if (!page)
                return;
 
        if (PageAnon(page))
                mss->anonymous += ptent_size;
 
+       if (page->index != pgoff)
+               mss->nonlinear += ptent_size;
+
        mss->resident += ptent_size;
        /* Accumulate the size in pages that have been accessed. */
        if (pte_young(ptent) || PageReferenced(page))
@@ -521,6 +531,10 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                   (vma->vm_flags & VM_LOCKED) ?
                        (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 
+       if (vma->vm_flags & VM_NONLINEAR)
+               seq_printf(m, "Nonlinear:      %8lu kB\n",
+                               mss.nonlinear >> 10);
+
        if (m->count < m->size)  /* vma is copied successfully */
                m->version = (vma != get_gate_vma(task->mm))
                        ? vma->vm_start : 0;
@@ -700,6 +714,7 @@ struct pagemapread {
 
 #define PM_PRESENT          PM_STATUS(4LL)
 #define PM_SWAP             PM_STATUS(2LL)
+#define PM_FILE             PM_STATUS(1LL)
 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
 #define PM_END_OF_BUFFER    1
 
@@ -733,22 +748,33 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
        return err;
 }
 
-static u64 swap_pte_to_pagemap_entry(pte_t pte)
+static void pte_to_pagemap_entry(pagemap_entry_t *pme,
+               struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 {
-       swp_entry_t e = pte_to_swp_entry(pte);
-       return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
-}
-
-static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
-{
-       if (is_swap_pte(pte))
-               *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
-                               | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
-       else if (pte_present(pte))
-               *pme = make_pme(PM_PFRAME(pte_pfn(pte))
-                               | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
-       else
+       u64 frame, flags;
+       struct page *page = NULL;
+
+       if (pte_present(pte)) {
+               frame = pte_pfn(pte);
+               flags = PM_PRESENT;
+               page = vm_normal_page(vma, addr, pte);
+       } else if (is_swap_pte(pte)) {
+               swp_entry_t entry = pte_to_swp_entry(pte);
+
+               frame = swp_type(entry) |
+                       (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+               flags = PM_SWAP;
+               if (is_migration_entry(entry))
+                       page = migration_entry_to_page(entry);
+       } else {
                *pme = make_pme(PM_NOT_PRESENT);
+               return;
+       }
+
+       if (page && !PageAnon(page))
+               flags |= PM_FILE;
+
+       *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -815,7 +841,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                if (vma && (vma->vm_start <= addr) &&
                    !is_vm_hugetlb_page(vma)) {
                        pte = pte_offset_map(pmd, addr);
-                       pte_to_pagemap_entry(&pme, *pte);
+                       pte_to_pagemap_entry(&pme, vma, addr, *pte);
                        /* unmap before userspace copy */
                        pte_unmap(pte);
                }
@@ -869,11 +895,11 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
  * For each page in the address space, this file contains one 64-bit entry
  * consisting of the following:
  *
- * Bits 0-55  page frame number (PFN) if present
+ * Bits 0-54  page frame number (PFN) if present
  * Bits 0-4   swap type if swapped
- * Bits 5-55  swap offset if swapped
+ * Bits 5-54  swap offset if swapped
  * Bits 55-60 page shift (page size = 1<<page shift)
- * Bit  61    reserved for future use
+ * Bit  61    page is file-page or shared-anon
  * Bit  62    page swapped
  * Bit  63    page present
  *
@@ -919,7 +945,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        if (!pm.buffer)
                goto out_task;
 
-       mm = mm_for_maps(task);
+       mm = mm_access(task, PTRACE_MODE_READ);
        ret = PTR_ERR(mm);
        if (!mm || IS_ERR(mm))
                goto out_free;
index 74fe164d1b233924f6a4d3e8ddd24624660dd63a..1ccfa537f5f5dfaac3c1351dabe391f0ca3e6d22 100644 (file)
@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = mm_for_maps(priv->task);
+       mm = mm_access(priv->task, PTRACE_MODE_READ);
        if (!mm || IS_ERR(mm)) {
                put_task_struct(priv->task);
                priv->task = NULL;
index 12412852d88a94d574bacebb5e64200f202db852..5e289a7cbad17d8547458f1d8b2526f2e85e5cb9 100644 (file)
@@ -23,12 +23,12 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &p->ns->poll, wait);
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        if (p->m.poll_event != ns->event) {
                p->m.poll_event = ns->event;
                res |= POLLERR | POLLPRI;
        }
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
 
        return res;
 }
index aeb19e68e0860a436998223cf854ff5502730e53..11a2aa2a56c4c0273a08d107d9242985ccc0369c 100644 (file)
@@ -258,7 +258,7 @@ fail:
        return rc;
 }
 
-int pstore_fill_super(struct super_block *sb, void *data, int silent)
+static int pstore_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct inode *inode;
 
index 82c585f715e341c36666fdc378f75408e3ea3d14..03ce7a9b81cc99765234e9b859f1bfea6a5d0208 100644 (file)
@@ -94,20 +94,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
  * as we can from the end of the buffer.
  */
 static void pstore_dump(struct kmsg_dumper *dumper,
-           enum kmsg_dump_reason reason,
-           const char *s1, unsigned long l1,
-           const char *s2, unsigned long l2)
+                       enum kmsg_dump_reason reason)
 {
-       unsigned long   s1_start, s2_start;
-       unsigned long   l1_cpy, l2_cpy;
-       unsigned long   size, total = 0;
-       char            *dst;
+       unsigned long   total = 0;
        const char      *why;
        u64             id;
-       int             hsize, ret;
        unsigned int    part = 1;
        unsigned long   flags = 0;
        int             is_locked = 0;
+       int             ret;
 
        why = get_reason_str(reason);
 
@@ -119,30 +114,25 @@ static void pstore_dump(struct kmsg_dumper *dumper,
                spin_lock_irqsave(&psinfo->buf_lock, flags);
        oopscount++;
        while (total < kmsg_bytes) {
+               char *dst;
+               unsigned long size;
+               int hsize;
+               size_t len;
+
                dst = psinfo->buf;
                hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
                size = psinfo->bufsize - hsize;
                dst += hsize;
 
-               l2_cpy = min(l2, size);
-               l1_cpy = min(l1, size - l2_cpy);
-
-               if (l1_cpy + l2_cpy == 0)
+               if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
                        break;
 
-               s2_start = l2 - l2_cpy;
-               s1_start = l1 - l1_cpy;
-
-               memcpy(dst, s1 + s1_start, l1_cpy);
-               memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
-
                ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
-                                  hsize + l1_cpy + l2_cpy, psinfo);
+                                   hsize + len, psinfo);
                if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
                        pstore_new_entry = 1;
-               l1 -= l1_cpy;
-               l2 -= l2_cpy;
-               total += l1_cpy + l2_cpy;
+
+               total += hsize + len;
                part++;
        }
        if (in_nmi()) {
index 9123cce28c1e8d6f511738cda7435861f1261ef7..453030f9c5bc2a68523cafc26c6d331d86855949 100644 (file)
@@ -106,6 +106,8 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
        time->tv_sec = 0;
        time->tv_nsec = 0;
 
+       /* Update old/shadowed buffer. */
+       persistent_ram_save_old(prz);
        size = persistent_ram_old_size(prz);
        *buf = kmalloc(size, GFP_KERNEL);
        if (*buf == NULL)
@@ -184,6 +186,7 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id,
                return -EINVAL;
 
        persistent_ram_free_old(cxt->przs[id]);
+       persistent_ram_zap(cxt->przs[id]);
 
        return 0;
 }
index 31f8d184f3a0a659cd53a5002c2c013bb13e76a0..c5fbdbbf81ac0d6fda33e9b848a9fafa5b2b21f2 100644 (file)
@@ -250,23 +250,24 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
        persistent_ram_update_ecc(prz, start, count);
 }
 
-static void __init
-persistent_ram_save_old(struct persistent_ram_zone *prz)
+void persistent_ram_save_old(struct persistent_ram_zone *prz)
 {
        struct persistent_ram_buffer *buffer = prz->buffer;
        size_t size = buffer_size(prz);
        size_t start = buffer_start(prz);
-       char *dest;
 
-       persistent_ram_ecc_old(prz);
+       if (!size)
+               return;
 
-       dest = kmalloc(size, GFP_KERNEL);
-       if (dest == NULL) {
+       if (!prz->old_log) {
+               persistent_ram_ecc_old(prz);
+               prz->old_log = kmalloc(size, GFP_KERNEL);
+       }
+       if (!prz->old_log) {
                pr_err("persistent_ram: failed to allocate buffer\n");
                return;
        }
 
-       prz->old_log = dest;
        prz->old_log_size = size;
        memcpy(prz->old_log, &buffer->data[start], size - start);
        memcpy(prz->old_log + size - start, &buffer->data[0], start);
@@ -319,6 +320,13 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz)
        prz->old_log_size = 0;
 }
 
+void persistent_ram_zap(struct persistent_ram_zone *prz)
+{
+       atomic_set(&prz->buffer->start, 0);
+       atomic_set(&prz->buffer->size, 0);
+       persistent_ram_update_header_ecc(prz);
+}
+
 static void *persistent_ram_vmap(phys_addr_t start, size_t size)
 {
        struct page **pages;
@@ -405,6 +413,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
                                " size %zu, start %zu\n",
                               buffer_size(prz), buffer_start(prz));
                        persistent_ram_save_old(prz);
+                       return 0;
                }
        } else {
                pr_info("persistent_ram: no valid data in buffer"
@@ -412,8 +421,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
        }
 
        prz->buffer->sig = PERSISTENT_RAM_SIG;
-       atomic_set(&prz->buffer->start, 0);
-       atomic_set(&prz->buffer->size, 0);
+       persistent_ram_zap(prz);
 
        return 0;
 }
@@ -448,7 +456,6 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
                goto err;
 
        persistent_ram_post_init(prz, ecc);
-       persistent_ram_update_header_ecc(prz);
 
        return prz;
 err:
index fbb0b478a346fbc77c854696c5e0e508760125ad..d5378d028589843e1cfef1efd5dba0b8cc4072cb 100644 (file)
@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 
                /* prevent the page from being discarded on memory pressure */
                SetPageDirty(page);
+               SetPageUptodate(page);
 
                unlock_page(page);
                put_page(page);
index ffc99d22e0a3656711f14ac7e094cc954d1d90bd..c20614f86c01ed88ed36a65e9dfafdabfd3ba4d3 100644 (file)
@@ -633,8 +633,7 @@ ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              unsigned long nr_segs, unsigned long fast_segs,
                              struct iovec *fast_pointer,
-                             struct iovec **ret_pointer,
-                             int check_access)
+                             struct iovec **ret_pointer)
 {
        unsigned long seg;
        ssize_t ret;
@@ -690,7 +689,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                        ret = -EINVAL;
                        goto out;
                }
-               if (check_access
+               if (type >= 0
                    && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
                        ret = -EFAULT;
                        goto out;
@@ -723,7 +722,7 @@ static ssize_t do_readv_writev(int type, struct file *file,
        }
 
        ret = rw_copy_check_uvector(type, uvector, nr_segs,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+                                   ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret <= 0)
                goto out;
 
index cc0a8227cddf688f70e289c427666057ce98e613..39e3370d79cf1e6399843137e2d64165baf49a03 100644 (file)
@@ -108,11 +108,11 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
        int error;
        struct file * file;
        struct readdir_callback buf;
+       int fput_needed;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.result = 0;
        buf.dirent = dirent;
@@ -121,8 +121,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
        if (buf.result)
                error = buf.result;
 
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -195,16 +194,15 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        struct file * file;
        struct linux_dirent __user * lastdirent;
        struct getdents_callback buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -221,8 +219,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -278,16 +275,15 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
        struct file * file;
        struct linux_dirent64 __user * lastdirent;
        struct getdents_callback64 buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -305,7 +301,6 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
index 59d06871a850dcebc966581f43c656bedba440f0..a6d4268fb6c11798db5f8339bd14ab297cd9b21f 100644 (file)
@@ -1592,13 +1592,12 @@ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
                (fh_type == 6) ? fid->raw[5] : 0);
 }
 
-int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
-                      int need_parent)
+int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
+                      struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
        int maxlen = *lenp;
 
-       if (need_parent && (maxlen < 5)) {
+       if (parent && (maxlen < 5)) {
                *lenp = 5;
                return 255;
        } else if (maxlen < 3) {
@@ -1610,20 +1609,15 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
        data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
        data[2] = inode->i_generation;
        *lenp = 3;
-       /* no room for directory info? return what we've stored so far */
-       if (maxlen < 5 || !need_parent)
-               return 3;
-
-       spin_lock(&dentry->d_lock);
-       inode = dentry->d_parent->d_inode;
-       data[3] = inode->i_ino;
-       data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
-       *lenp = 5;
-       if (maxlen >= 6) {
-               data[5] = inode->i_generation;
-               *lenp = 6;
-       }
-       spin_unlock(&dentry->d_lock);
+       if (parent) {
+               data[3] = parent->i_ino;
+               data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
+               *lenp = 5;
+               if (maxlen >= 6) {
+                       data[5] = parent->i_generation;
+                       *lenp = 6;
+               }
+       }
        return *lenp;
 }
 
index b1a08573fe14277961aa3039ce0fb587d4f889cc..afcadcc03e8ac87c7f25f3e2393b3c108daaf91d 100644 (file)
@@ -1923,6 +1923,8 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
         * the workqueue job (flush_async_commit) needs this lock
         */
        reiserfs_write_unlock(sb);
+
+       cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
        flush_workqueue(commit_wq);
 
        if (!reiserfs_mounted_fs_count) {
@@ -3231,8 +3233,6 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
                               th->t_trans_id, journal->j_trans_id);
        }
 
-       sb->s_dirt = 1;
-
        prepared = test_clear_buffer_journal_prepared(bh);
        clear_buffer_journal_restore_dirty(bh);
        /* already in this transaction, we are done */
@@ -3316,6 +3316,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
                journal->j_first = cn;
                journal->j_last = cn;
        }
+       reiserfs_schedule_old_flush(sb);
        return 0;
 }
 
@@ -3492,7 +3493,7 @@ static void flush_async_commits(struct work_struct *work)
 ** flushes any old transactions to disk
 ** ends the current transaction if it is too old
 */
-int reiserfs_flush_old_commits(struct super_block *sb)
+void reiserfs_flush_old_commits(struct super_block *sb)
 {
        time_t now;
        struct reiserfs_transaction_handle th;
@@ -3502,9 +3503,8 @@ int reiserfs_flush_old_commits(struct super_block *sb)
        /* safety check so we don't flush while we are replaying the log during
         * mount
         */
-       if (list_empty(&journal->j_journal_list)) {
-               return 0;
-       }
+       if (list_empty(&journal->j_journal_list))
+               return;
 
        /* check the current transaction.  If there are no writers, and it is
         * too old, finish it, and force the commit blocks to disk
@@ -3526,7 +3526,6 @@ int reiserfs_flush_old_commits(struct super_block *sb)
                        do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
                }
        }
-       return sb->s_dirt;
 }
 
 /*
@@ -3955,7 +3954,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
         ** it tells us if we should continue with the journal_end, or just return
         */
        if (!check_journal_end(th, sb, nblocks, flags)) {
-               sb->s_dirt = 1;
+               reiserfs_schedule_old_flush(sb);
                wake_queued_writers(sb);
                reiserfs_async_progress_wait(sb);
                goto out;
index a59d27126338e43939f8fc04942acf13c956f1e4..33215f57ea06ce3026ef2d488832a337a361bd71 100644 (file)
@@ -480,6 +480,11 @@ struct reiserfs_sb_info {
        struct dentry *priv_root;       /* root of /.reiserfs_priv */
        struct dentry *xattr_root;      /* root of /.reiserfs_priv/xattrs */
        int j_errno;
+
+       int work_queued;              /* non-zero delayed work is queued */
+       struct delayed_work old_work; /* old transactions flush delayed work */
+       spinlock_t old_work_lock;     /* protects old_work and work_queued */
+
 #ifdef CONFIG_QUOTA
        char *s_qf_names[MAXQUOTAS];
        int s_jquota_fmt;
@@ -2452,7 +2457,7 @@ struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
 int reiserfs_commit_page(struct inode *inode, struct page *page,
                         unsigned from, unsigned to);
-int reiserfs_flush_old_commits(struct super_block *);
+void reiserfs_flush_old_commits(struct super_block *);
 int reiserfs_commit_for_inode(struct inode *);
 int reiserfs_inode_needs_commit(struct inode *);
 void reiserfs_update_inode_transaction(struct inode *);
@@ -2487,6 +2492,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
 int reiserfs_allocate_list_bitmaps(struct super_block *s,
                                   struct reiserfs_list_bitmap *, unsigned int);
 
+void reiserfs_schedule_old_flush(struct super_block *s);
 void add_save_link(struct reiserfs_transaction_handle *th,
                   struct inode *inode, int truncate);
 int remove_save_link(struct inode *inode, int truncate);
@@ -2611,8 +2617,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
                                     int fh_len, int fh_type);
 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
                                     int fh_len, int fh_type);
-int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
-                      int connectable);
+int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
+                      struct inode *parent);
 
 int reiserfs_truncate_file(struct inode *, int update_timestamps);
 void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset,
index 9a17f63c3fd7f3618a44bdf946476e4957dcd50d..3ce02cff5e90bd1c26374e12e15a6f56ea8c8803 100644 (file)
@@ -200,7 +200,6 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
                                          (bmap_nr_new - bmap_nr)));
        PUT_SB_BLOCK_COUNT(s, block_count_new);
        PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);
-       s->s_dirt = 1;
 
        journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
 
index c07b7d709447de1670e9caf8a7fe9bbca6593138..651ce767b55d8241e283b3001d7fc9e6d803b317 100644 (file)
@@ -72,20 +72,58 @@ static int reiserfs_sync_fs(struct super_block *s, int wait)
        if (!journal_begin(&th, s, 1))
                if (!journal_end_sync(&th, s, 1))
                        reiserfs_flush_old_commits(s);
-       s->s_dirt = 0;  /* Even if it's not true.
-                        * We'll loop forever in sync_supers otherwise */
        reiserfs_write_unlock(s);
        return 0;
 }
 
-static void reiserfs_write_super(struct super_block *s)
+static void flush_old_commits(struct work_struct *work)
 {
+       struct reiserfs_sb_info *sbi;
+       struct super_block *s;
+
+       sbi = container_of(work, struct reiserfs_sb_info, old_work.work);
+       s = sbi->s_journal->j_work_sb;
+
+       spin_lock(&sbi->old_work_lock);
+       sbi->work_queued = 0;
+       spin_unlock(&sbi->old_work_lock);
+
        reiserfs_sync_fs(s, 1);
 }
 
+void reiserfs_schedule_old_flush(struct super_block *s)
+{
+       struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+       unsigned long delay;
+
+       if (s->s_flags & MS_RDONLY)
+               return;
+
+       spin_lock(&sbi->old_work_lock);
+       if (!sbi->work_queued) {
+               delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+               queue_delayed_work(system_long_wq, &sbi->old_work, delay);
+               sbi->work_queued = 1;
+       }
+       spin_unlock(&sbi->old_work_lock);
+}
+
+static void cancel_old_flush(struct super_block *s)
+{
+       struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+
+       cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+       spin_lock(&sbi->old_work_lock);
+       sbi->work_queued = 0;
+       spin_unlock(&sbi->old_work_lock);
+}
+
 static int reiserfs_freeze(struct super_block *s)
 {
        struct reiserfs_transaction_handle th;
+
+       cancel_old_flush(s);
+
        reiserfs_write_lock(s);
        if (!(s->s_flags & MS_RDONLY)) {
                int err = journal_begin(&th, s, 1);
@@ -99,7 +137,6 @@ static int reiserfs_freeze(struct super_block *s)
                        journal_end_sync(&th, s, 1);
                }
        }
-       s->s_dirt = 0;
        reiserfs_write_unlock(s);
        return 0;
 }
@@ -483,9 +520,6 @@ static void reiserfs_put_super(struct super_block *s)
 
        reiserfs_write_lock(s);
 
-       if (s->s_dirt)
-               reiserfs_write_super(s);
-
        /* change file system state to current state if it was mounted with read-write permissions */
        if (!(s->s_flags & MS_RDONLY)) {
                if (!journal_begin(&th, s, 10)) {
@@ -692,7 +726,6 @@ static const struct super_operations reiserfs_sops = {
        .dirty_inode = reiserfs_dirty_inode,
        .evict_inode = reiserfs_evict_inode,
        .put_super = reiserfs_put_super,
-       .write_super = reiserfs_write_super,
        .sync_fs = reiserfs_sync_fs,
        .freeze_fs = reiserfs_freeze,
        .unfreeze_fs = reiserfs_unfreeze,
@@ -1400,7 +1433,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        err = journal_end(&th, s, 10);
        if (err)
                goto out_err;
-       s->s_dirt = 0;
 
        if (!(*mount_flags & MS_RDONLY)) {
                dquot_resume(s, -1);
@@ -1730,19 +1762,21 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                return -ENOMEM;
        s->s_fs_info = sbi;
        /* Set default values for options: non-aggressive tails, RO on errors */
-       REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
-       REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
-       REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
+       sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+       sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+       sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
        /* no preallocation minimum, be smart in
           reiserfs_file_write instead */
-       REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
+       sbi->s_alloc_options.preallocmin = 0;
        /* Preallocate by 16 blocks (17-1) at once */
-       REISERFS_SB(s)->s_alloc_options.preallocsize = 17;
+       sbi->s_alloc_options.preallocsize = 17;
        /* setup default block allocator options */
        reiserfs_init_alloc_options(s);
 
-       mutex_init(&REISERFS_SB(s)->lock);
-       REISERFS_SB(s)->lock_depth = -1;
+       spin_lock_init(&sbi->old_work_lock);
+       INIT_DELAYED_WORK(&sbi->old_work, flush_old_commits);
+       mutex_init(&sbi->lock);
+       sbi->lock_depth = -1;
 
        jdev_name = NULL;
        if (reiserfs_parse_options
@@ -1751,8 +1785,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                goto error_unlocked;
        }
        if (jdev_name && jdev_name[0]) {
-               REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
-               if (!REISERFS_SB(s)->s_jdev) {
+               sbi->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
+               if (!sbi->s_jdev) {
                        SWARN(silent, s, "", "Cannot allocate memory for "
                                "journal device name");
                        goto error;
@@ -1810,7 +1844,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        /* make data=ordered the default */
        if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) &&
            !reiserfs_data_writeback(s)) {
-               REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
+               sbi->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
        }
 
        if (reiserfs_data_log(s)) {
@@ -2003,6 +2037,8 @@ error_unlocked:
                reiserfs_write_unlock(s);
        }
 
+       cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+
        reiserfs_free_bitmap_cache(s);
        if (SB_BUFFER_WITH_SB(s))
                brelse(SB_BUFFER_WITH_SB(s));
index 17d33d09fc16f4843c72f9c7444dfdb3fb738952..bae321569dfa7283b290a2e2d65c99bb10b622f4 100644 (file)
@@ -614,7 +614,6 @@ SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
        return ret;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
                       fd_set __user *exp, struct timespec __user *tsp,
                       const sigset_t __user *sigmask, size_t sigsetsize)
@@ -686,7 +685,6 @@ SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
 
        return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #ifdef __ARCH_WANT_SYS_OLD_SELECT
 struct sel_arg_struct {
@@ -941,7 +939,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
        return ret;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
                struct timespec __user *, tsp, const sigset_t __user *, sigmask,
                size_t, sigsetsize)
@@ -992,4 +989,3 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
 
        return ret;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
index 7ae2a574cb25a64902128f53832b317202dbee8f..9f35a37173de0de1f7fbd8d80ca8ad39b50e3782 100644 (file)
@@ -269,12 +269,13 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
                if (ufd < 0)
                        kfree(ctx);
        } else {
-               struct file *file = fget(ufd);
+               int fput_needed;
+               struct file *file = fget_light(ufd, &fput_needed);
                if (!file)
                        return -EBADF;
                ctx = file->private_data;
                if (file->f_op != &signalfd_fops) {
-                       fput(file);
+                       fput_light(file, fput_needed);
                        return -EINVAL;
                }
                spin_lock_irq(&current->sighand->siglock);
@@ -282,7 +283,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
                spin_unlock_irq(&current->sighand->siglock);
 
                wake_up(&current->sighand->signalfd_wqh);
-               fput(file);
+               fput_light(file, fput_needed);
        }
 
        return ufd;
index 406ef2b792c293d709aa164481d20f7ee37b2ed4..7bf08fa22ec9ab122bad5438a02bd78db6f1e885 100644 (file)
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+       spd->nr_pages_max = buffers;
+       if (buffers <= PIPE_DEF_BUFFERS)
                return 0;
 
-       spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-       spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+       spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+       spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
        if (spd->pages && spd->partial)
                return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
        return -ENOMEM;
 }
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-                      struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
                return;
 
        kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
         * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return error;
 }
 
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &default_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
        res = -ENOMEM;
        vec = __vec;
-       if (pipe->buffers > PIPE_DEF_BUFFERS) {
-               vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+       if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+               vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
                if (!vec)
                        goto shrink_ret;
        }
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        offset = *ppos & ~PAGE_CACHE_MASK;
        nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+       for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
 
                page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
        if (vec != __vec)
                kfree(vec);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return res;
 
 err:
@@ -1003,8 +1007,10 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
                mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
                ret = file_remove_suid(out);
                if (!ret) {
-                       file_update_time(out);
-                       ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file);
+                       ret = file_update_time(out);
+                       if (!ret)
+                               ret = splice_from_pipe_feed(pipe, &sd,
+                                                           pipe_to_file);
                }
                mutex_unlock(&inode->i_mutex);
        } while (ret > 0);
@@ -1612,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &user_page_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1627,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
                                            spd.partial, false,
-                                           pipe->buffers);
+                                           spd.nr_pages_max);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
        else
                ret = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index 43e6b6fe4e855684a197c48ed6bb8dee70f95467..95ad5c0e586c9f64fe492e141387b5092956d553 100644 (file)
@@ -87,11 +87,12 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
 
 int fd_statfs(int fd, struct kstatfs *st)
 {
-       struct file *file = fget(fd);
+       int fput_needed;
+       struct file *file = fget_light(fd, &fput_needed);
        int error = -EBADF;
        if (file) {
                error = vfs_statfs(&file->f_path, st);
-               fput(file);
+               fput_light(file, fput_needed);
        }
        return error;
 }
index 0e8db939d96f8fdaa072df7e6fcadb15a5781a84..11e3d1c449018dcf9a95c352746f46d6522c4cb2 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -188,11 +188,12 @@ static int do_fsync(unsigned int fd, int datasync)
 {
        struct file *file;
        int ret = -EBADF;
+       int fput_needed;
 
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (file) {
                ret = vfs_fsync(file, datasync);
-               fput(file);
+               fput_light(file, fput_needed);
        }
        return ret;
 }
index 685a83756b2b7df6e93373083324839ff14bac54..92df3b08153901350a433f8b95c95cd493479aad 100644 (file)
@@ -2918,6 +2918,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
        struct dentry *dent;
        struct ubifs_debug_info *d = c->dbg;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
                     c->vi.ubi_num, c->vi.vol_id);
        if (n == UBIFS_DFS_DIR_LEN) {
@@ -3010,7 +3013,8 @@ out:
  */
 void dbg_debugfs_exit_fs(struct ubifs_info *c)
 {
-       debugfs_remove_recursive(c->dbg->dfs_dir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(c->dbg->dfs_dir);
 }
 
 struct ubifs_global_debug_info ubifs_dbg;
@@ -3095,6 +3099,9 @@ int dbg_debugfs_init(void)
        const char *fname;
        struct dentry *dent;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        fname = "ubifs";
        dent = debugfs_create_dir(fname, NULL);
        if (IS_ERR_OR_NULL(dent))
@@ -3159,7 +3166,8 @@ out:
  */
 void dbg_debugfs_exit(void)
 {
-       debugfs_remove_recursive(dfs_rootdir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(dfs_rootdir);
 }
 
 /**
index 62a2727f4ecf71809f518dd206922fc7f9234f0d..a6d42efc76d227d62289f852982160442d7e5cea 100644 (file)
@@ -1127,16 +1127,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        struct ubifs_inode *ui = ubifs_inode(inode);
 
        mutex_lock(&ui->ui_mutex);
-       stat->dev = inode->i_sb->s_dev;
-       stat->ino = inode->i_ino;
-       stat->mode = inode->i_mode;
-       stat->nlink = inode->i_nlink;
-       stat->uid = inode->i_uid;
-       stat->gid = inode->i_gid;
-       stat->rdev = inode->i_rdev;
-       stat->atime = inode->i_atime;
-       stat->mtime = inode->i_mtime;
-       stat->ctime = inode->i_ctime;
+       generic_fillattr(inode, stat);
        stat->blksize = UBIFS_BLOCK_SIZE;
        stat->size = ui->ui_size;
 
index 2559d174e0040a45578fcc2e7612522a08e99a2f..28ec13af28d91c360c89c68f7af0f1957e79477a 100644 (file)
@@ -939,8 +939,8 @@ static int find_dirtiest_idx_leb(struct ubifs_info *c)
        }
        dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty,
                 lp->free, lp->flags);
-       ubifs_assert(lp->flags | LPROPS_TAKEN);
-       ubifs_assert(lp->flags | LPROPS_INDEX);
+       ubifs_assert(lp->flags & LPROPS_TAKEN);
+       ubifs_assert(lp->flags & LPROPS_INDEX);
        return lnum;
 }
 
index ef3d1ba6d992b8ffb9ab0d57e633a22d586758d2..15e2fc5aa60bd4790a86c828aaea6995b17e1737 100644 (file)
@@ -718,8 +718,12 @@ static int fixup_free_space(struct ubifs_info *c)
                lnum = ubifs_next_log_lnum(c, lnum);
        }
 
-       /* Fixup the current log head */
-       err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
+       /*
+        * Fixup the log head which contains the only a CS node at the
+        * beginning.
+        */
+       err = fixup_leb(c, c->lhead_lnum,
+                       ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
        if (err)
                goto out;
 
index a165c66e3eef2249379890c60a4c7d4111e8df4d..18024178ac4c040a3f23181ff2dc1a5cc48f2dc8 100644 (file)
@@ -1260,16 +1260,15 @@ static struct dentry *udf_fh_to_parent(struct super_block *sb,
                                 fid->udf.parent_partref,
                                 fid->udf.parent_generation);
 }
-static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
-                        int connectable)
+static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
+                        struct inode *parent)
 {
        int len = *lenp;
-       struct inode *inode =  de->d_inode;
        struct kernel_lb_addr location = UDF_I(inode)->i_location;
        struct fid *fid = (struct fid *)fh;
        int type = FILEID_UDF_WITHOUT_PARENT;
 
-       if (connectable && (len < 5)) {
+       if (parent && (len < 5)) {
                *lenp = 5;
                return 255;
        } else if (len < 3) {
@@ -1282,14 +1281,11 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
        fid->udf.partref = location.partitionReferenceNum;
        fid->udf.generation = inode->i_generation;
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               spin_lock(&de->d_lock);
-               inode = de->d_parent->d_inode;
-               location = UDF_I(inode)->i_location;
+       if (parent) {
+               location = UDF_I(parent)->i_location;
                fid->udf.parent_block = location.logicalBlockNum;
                fid->udf.parent_partref = location.partitionReferenceNum;
                fid->udf.parent_generation = inode->i_generation;
-               spin_unlock(&de->d_lock);
                *lenp = 5;
                type = FILEID_UDF_WITH_PARENT;
        }
index ac8a348dcb693bb10a24a930346b82bc7db799f9..8d86a8706c0e4d93bcace5ed039a71388cd966d5 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/seq_file.h>
 #include <linux/bitmap.h>
 #include <linux/crc-itu-t.h>
+#include <linux/log2.h>
 #include <asm/byteorder.h>
 
 #include "udf_sb.h"
@@ -1215,16 +1216,65 @@ out_bh:
        return ret;
 }
 
+static int udf_load_sparable_map(struct super_block *sb,
+                                struct udf_part_map *map,
+                                struct sparablePartitionMap *spm)
+{
+       uint32_t loc;
+       uint16_t ident;
+       struct sparingTable *st;
+       struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
+       int i;
+       struct buffer_head *bh;
+
+       map->s_partition_type = UDF_SPARABLE_MAP15;
+       sdata->s_packet_len = le16_to_cpu(spm->packetLength);
+       if (!is_power_of_2(sdata->s_packet_len)) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Invalid packet length %u\n",
+                       (unsigned)sdata->s_packet_len);
+               return -EIO;
+       }
+       if (spm->numSparingTables > 4) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Too many sparing tables (%d)\n",
+                       (int)spm->numSparingTables);
+               return -EIO;
+       }
+
+       for (i = 0; i < spm->numSparingTables; i++) {
+               loc = le32_to_cpu(spm->locSparingTable[i]);
+               bh = udf_read_tagged(sb, loc, loc, &ident);
+               if (!bh)
+                       continue;
+
+               st = (struct sparingTable *)bh->b_data;
+               if (ident != 0 ||
+                   strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
+                           strlen(UDF_ID_SPARING)) ||
+                   sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
+                                                       sb->s_blocksize) {
+                       brelse(bh);
+                       continue;
+               }
+
+               sdata->s_spar_map[i] = bh;
+       }
+       map->s_partition_func = udf_get_pblock_spar15;
+       return 0;
+}
+
 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                               struct kernel_lb_addr *fileset)
 {
        struct logicalVolDesc *lvd;
-       int i, j, offset;
+       int i, offset;
        uint8_t type;
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct genericPartitionMap *gpm;
        uint16_t ident;
        struct buffer_head *bh;
+       unsigned int table_len;
        int ret = 0;
 
        bh = udf_read_tagged(sb, block, block, &ident);
@@ -1232,15 +1282,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                return 1;
        BUG_ON(ident != TAG_IDENT_LVD);
        lvd = (struct logicalVolDesc *)bh->b_data;
-
-       i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
-       if (i != 0) {
-               ret = i;
+       table_len = le32_to_cpu(lvd->mapTableLength);
+       if (sizeof(*lvd) + table_len > sb->s_blocksize) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Partition table too long (%u > %lu)\n", table_len,
+                       sb->s_blocksize - sizeof(*lvd));
                goto out_bh;
        }
 
+       ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
+       if (ret)
+               goto out_bh;
+
        for (i = 0, offset = 0;
-            i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
+            i < sbi->s_partitions && offset < table_len;
             i++, offset += gpm->partitionMapLength) {
                struct udf_part_map *map = &sbi->s_partmaps[i];
                gpm = (struct genericPartitionMap *)
@@ -1275,38 +1330,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
-                               uint32_t loc;
-                               struct sparingTable *st;
-                               struct sparablePartitionMap *spm =
-                                       (struct sparablePartitionMap *)gpm;
-
-                               map->s_partition_type = UDF_SPARABLE_MAP15;
-                               map->s_type_specific.s_sparing.s_packet_len =
-                                               le16_to_cpu(spm->packetLength);
-                               for (j = 0; j < spm->numSparingTables; j++) {
-                                       struct buffer_head *bh2;
-
-                                       loc = le32_to_cpu(
-                                               spm->locSparingTable[j]);
-                                       bh2 = udf_read_tagged(sb, loc, loc,
-                                                            &ident);
-                                       map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = bh2;
-
-                                       if (bh2 == NULL)
-                                               continue;
-
-                                       st = (struct sparingTable *)bh2->b_data;
-                                       if (ident != 0 || strncmp(
-                                               st->sparingIdent.ident,
-                                               UDF_ID_SPARING,
-                                               strlen(UDF_ID_SPARING))) {
-                                               brelse(bh2);
-                                               map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = NULL;
-                                       }
-                               }
-                               map->s_partition_func = udf_get_pblock_spar15;
+                               if (udf_load_sparable_map(sb, map,
+                                   (struct sparablePartitionMap *)gpm) < 0)
+                                       goto out_bh;
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
index ba653f3dc1bc9c66010290e53e0bb2a5b8fd94b1..fa4dbe451e278eab0f52bbacc110b157a300cdad 100644 (file)
@@ -140,18 +140,19 @@ long do_utimes(int dfd, const char __user *filename, struct timespec *times,
                goto out;
 
        if (filename == NULL && dfd != AT_FDCWD) {
+               int fput_needed;
                struct file *file;
 
                if (flags & AT_SYMLINK_NOFOLLOW)
                        goto out;
 
-               file = fget(dfd);
+               file = fget_light(dfd, &fput_needed);
                error = -EBADF;
                if (!file)
                        goto out;
 
                error = utimes_common(&file->f_path, times);
-               fput(file);
+               fput_light(file, fput_needed);
        } else {
                struct path path;
                int lookup_flags = 0;
index 3c8c1cc333c7c79dfa105049a62d8b6e1c28661c..1d7ac379045879b827b196f0d7a7420fe33c783d 100644 (file)
@@ -399,11 +399,12 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
 SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
                const void __user *,value, size_t, size, int, flags)
 {
+       int fput_needed;
        struct file *f;
        struct dentry *dentry;
        int error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        dentry = f->f_path.dentry;
@@ -413,7 +414,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
                error = setxattr(dentry, name, value, size, flags);
                mnt_drop_write_file(f);
        }
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
@@ -486,15 +487,16 @@ SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
 SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
                void __user *, value, size_t, size)
 {
+       int fput_needed;
        struct file *f;
        ssize_t error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        audit_inode(NULL, f->f_path.dentry);
        error = getxattr(f->f_path.dentry, name, value, size);
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
@@ -566,15 +568,16 @@ SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
 
 SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
 {
+       int fput_needed;
        struct file *f;
        ssize_t error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        audit_inode(NULL, f->f_path.dentry);
        error = listxattr(f->f_path.dentry, list, size);
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
@@ -634,11 +637,12 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
 
 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
 {
+       int fput_needed;
        struct file *f;
        struct dentry *dentry;
        int error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        dentry = f->f_path.dentry;
@@ -648,7 +652,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
                error = removexattr(dentry, name);
                mnt_drop_write_file(f);
        }
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
index a907de565db3bf287f7d1a7894fff23f85ca18d5..4a7286c1dc80d270af40a3733870bb9dd769ee82 100644 (file)
@@ -46,7 +46,7 @@ kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
 }
 
 void *
-kmem_alloc(size_t size, unsigned int __nocast flags)
+kmem_alloc(size_t size, xfs_km_flags_t flags)
 {
        int     retries = 0;
        gfp_t   lflags = kmem_flags_convert(flags);
@@ -65,7 +65,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
 }
 
 void *
-kmem_zalloc(size_t size, unsigned int __nocast flags)
+kmem_zalloc(size_t size, xfs_km_flags_t flags)
 {
        void    *ptr;
 
@@ -87,7 +87,7 @@ kmem_free(const void *ptr)
 
 void *
 kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
-            unsigned int __nocast flags)
+            xfs_km_flags_t flags)
 {
        void    *new;
 
@@ -102,7 +102,7 @@ kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
 }
 
 void *
-kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
 {
        int     retries = 0;
        gfp_t   lflags = kmem_flags_convert(flags);
@@ -121,7 +121,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
 }
 
 void *
-kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
 {
        void    *ptr;
 
index ab7c53fe346e2273311a1b73bee866ab8c4f8d7f..b2f2620f9a87b9f1bf6836c8faf3d5039e7af94f 100644 (file)
  * General memory allocation interfaces
  */
 
-#define KM_SLEEP       0x0001u
-#define KM_NOSLEEP     0x0002u
-#define KM_NOFS                0x0004u
-#define KM_MAYFAIL     0x0008u
+typedef unsigned __bitwise xfs_km_flags_t;
+#define KM_SLEEP       ((__force xfs_km_flags_t)0x0001u)
+#define KM_NOSLEEP     ((__force xfs_km_flags_t)0x0002u)
+#define KM_NOFS                ((__force xfs_km_flags_t)0x0004u)
+#define KM_MAYFAIL     ((__force xfs_km_flags_t)0x0008u)
 
 /*
  * We use a special process flag to avoid recursive callbacks into
@@ -38,7 +39,7 @@
  * warnings, so we explicitly skip any generic ones (silly of us).
  */
 static inline gfp_t
-kmem_flags_convert(unsigned int __nocast flags)
+kmem_flags_convert(xfs_km_flags_t flags)
 {
        gfp_t   lflags;
 
@@ -54,9 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
        return lflags;
 }
 
-extern void *kmem_alloc(size_t, unsigned int __nocast);
-extern void *kmem_zalloc(size_t, unsigned int __nocast);
-extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
+extern void *kmem_alloc(size_t, xfs_km_flags_t);
+extern void *kmem_zalloc(size_t, xfs_km_flags_t);
+extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
 extern void  kmem_free(const void *);
 
 static inline void *kmem_zalloc_large(size_t size)
@@ -107,7 +108,7 @@ kmem_zone_destroy(kmem_zone_t *zone)
                kmem_cache_destroy(zone);
 }
 
-extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
-extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
+extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
+extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t);
 
 #endif /* __XFS_SUPPORT_KMEM_H__ */
index 229641fb8e67af0674df635908611b619ebbd6ca..4f33c32affe3d2eb4bec9c5ca8d71a33bbc5032a 100644 (file)
@@ -1074,12 +1074,13 @@ restart:
         * If we couldn't get anything, give up.
         */
        if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
+               xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+
                if (!forced++) {
                        trace_xfs_alloc_near_busy(args);
                        xfs_log_force(args->mp, XFS_LOG_SYNC);
                        goto restart;
                }
-
                trace_xfs_alloc_size_neither(args);
                args->agbno = NULLAGBLOCK;
                return 0;
@@ -2433,15 +2434,24 @@ xfs_alloc_vextent_worker(
        current_restore_flags_nested(&pflags, PF_FSTRANS);
 }
 
-
-int                            /* error */
+/*
+ * Data allocation requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. Metadata
+ * requests, OTOH, are generally from low stack usage paths, so avoid the
+ * context switch overhead here.
+ */
+int
 xfs_alloc_vextent(
-       xfs_alloc_arg_t *args)  /* allocation argument structure */
+       struct xfs_alloc_arg    *args)
 {
        DECLARE_COMPLETION_ONSTACK(done);
 
+       if (!args->userdata)
+               return __xfs_alloc_vextent(args);
+
+
        args->done = &done;
-       INIT_WORK(&args->work, xfs_alloc_vextent_worker);
+       INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
        queue_work(xfs_alloc_wq, &args->work);
        wait_for_completion(&done);
        return args->result;
index ae31c313a79ef6f0f53155a9b83877c6e56fc05a..8dad722c00410f25294f0a8fec608fcbd56e74e8 100644 (file)
@@ -981,10 +981,15 @@ xfs_vm_writepage(
                                imap_valid = 0;
                        }
                } else {
-                       if (PageUptodate(page)) {
+                       if (PageUptodate(page))
                                ASSERT(buffer_mapped(bh));
-                               imap_valid = 0;
-                       }
+                       /*
+                        * This buffer is not uptodate and will not be
+                        * written to disk.  Ensure that we will put any
+                        * subsequent writeable buffers into a new
+                        * ioend.
+                        */
+                       imap_valid = 0;
                        continue;
                }
 
index 172d3cc8f8cb8ad6d588fea95819ecfded627167..269b35c084dab6906267ccf66f473c98e85ce473 100644 (file)
@@ -201,14 +201,7 @@ xfs_buf_alloc(
        bp->b_length = numblks;
        bp->b_io_length = numblks;
        bp->b_flags = flags;
-
-       /*
-        * We do not set the block number here in the buffer because we have not
-        * finished initialising the buffer. We insert the buffer into the cache
-        * in this state, so this ensures that we are unable to do IO on a
-        * buffer that hasn't been fully initialised.
-        */
-       bp->b_bn = XFS_BUF_DADDR_NULL;
+       bp->b_bn = blkno;
        atomic_set(&bp->b_pin_count, 0);
        init_waitqueue_head(&bp->b_waiters);
 
@@ -567,11 +560,6 @@ xfs_buf_get(
        if (bp != new_bp)
                xfs_buf_free(new_bp);
 
-       /*
-        * Now we have a workable buffer, fill in the block number so
-        * that we can do IO on it.
-        */
-       bp->b_bn = blkno;
        bp->b_io_length = bp->b_length;
 
 found:
@@ -772,7 +760,7 @@ xfs_buf_get_uncached(
        int                     error, i;
        xfs_buf_t               *bp;
 
-       bp = xfs_buf_alloc(target, 0, numblks, 0);
+       bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
        if (unlikely(bp == NULL))
                goto fail;
 
@@ -1001,27 +989,6 @@ xfs_buf_ioerror_alert(
                (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
 }
 
-int
-xfs_bwrite(
-       struct xfs_buf          *bp)
-{
-       int                     error;
-
-       ASSERT(xfs_buf_islocked(bp));
-
-       bp->b_flags |= XBF_WRITE;
-       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
-
-       xfs_bdstrat_cb(bp);
-
-       error = xfs_buf_iowait(bp);
-       if (error) {
-               xfs_force_shutdown(bp->b_target->bt_mount,
-                                  SHUTDOWN_META_IO_ERROR);
-       }
-       return error;
-}
-
 /*
  * Called when we want to stop a buffer from getting written or read.
  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
@@ -1091,14 +1058,7 @@ xfs_bioerror_relse(
        return EIO;
 }
 
-
-/*
- * All xfs metadata buffers except log state machine buffers
- * get this attached as their b_bdstrat callback function.
- * This is so that we can catch a buffer
- * after prematurely unpinning it to forcibly shutdown the filesystem.
- */
-int
+STATIC int
 xfs_bdstrat_cb(
        struct xfs_buf  *bp)
 {
@@ -1119,6 +1079,27 @@ xfs_bdstrat_cb(
        return 0;
 }
 
+int
+xfs_bwrite(
+       struct xfs_buf          *bp)
+{
+       int                     error;
+
+       ASSERT(xfs_buf_islocked(bp));
+
+       bp->b_flags |= XBF_WRITE;
+       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
+
+       xfs_bdstrat_cb(bp);
+
+       error = xfs_buf_iowait(bp);
+       if (error) {
+               xfs_force_shutdown(bp->b_target->bt_mount,
+                                  SHUTDOWN_META_IO_ERROR);
+       }
+       return error;
+}
+
 /*
  * Wrapper around bdstrat so that we can stop data from going to disk in case
  * we are shutting down the filesystem.  Typically user data goes thru this
@@ -1255,7 +1236,7 @@ xfs_buf_iorequest(
         */
        atomic_set(&bp->b_io_remaining, 1);
        _xfs_buf_ioapply(bp);
-       _xfs_buf_ioend(bp, 0);
+       _xfs_buf_ioend(bp, 1);
 
        xfs_buf_rele(bp);
 }
index 7f1d1392ce37c292a04e8a22268537527687e10f..79344c48008eedaab4aaa21dc6ed28fe328607d3 100644 (file)
@@ -180,7 +180,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
 extern int xfs_bwrite(struct xfs_buf *bp);
 
 extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-extern int xfs_bdstrat_cb(struct xfs_buf *);
 
 extern void xfs_buf_ioend(xfs_buf_t *, int);
 extern void xfs_buf_ioerror(xfs_buf_t *, int);
index 45df2b857d482fe478c3f4965ddfa57a648eb6ee..d9e451115f980ac1529c4e89aa89665c82661360 100644 (file)
@@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks(
 
                if (!XFS_BUF_ISSTALE(bp)) {
                        bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
-                       xfs_bdstrat_cb(bp);
+                       xfs_buf_iorequest(bp);
                } else {
                        xfs_buf_relse(bp);
                }
index 2d25d19c4ea17b991fa4a43dcbb340e5c957c461..42679223a0fde641e3013980fbd1e733dc6ec60e 100644 (file)
@@ -52,19 +52,18 @@ static int xfs_fileid_length(int fileid_type)
 
 STATIC int
 xfs_fs_encode_fh(
-       struct dentry           *dentry,
-       __u32                   *fh,
-       int                     *max_len,
-       int                     connectable)
+       struct inode    *inode,
+       __u32           *fh,
+       int             *max_len,
+       struct inode    *parent)
 {
        struct fid              *fid = (struct fid *)fh;
        struct xfs_fid64        *fid64 = (struct xfs_fid64 *)fh;
-       struct inode            *inode = dentry->d_inode;
        int                     fileid_type;
        int                     len;
 
        /* Directories don't need their parent encoded, they have ".." */
-       if (S_ISDIR(inode->i_mode) || !connectable)
+       if (!parent)
                fileid_type = FILEID_INO32_GEN;
        else
                fileid_type = FILEID_INO32_GEN_PARENT;
@@ -96,20 +95,16 @@ xfs_fs_encode_fh(
 
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
-               spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
-               fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
-               spin_unlock(&dentry->d_lock);
+               fid->i32.parent_ino = XFS_I(parent)->i_ino;
+               fid->i32.parent_gen = parent->i_generation;
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
                fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
-               spin_lock(&dentry->d_lock);
-               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
-               fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
-               spin_unlock(&dentry->d_lock);
+               fid64->parent_ino = XFS_I(parent)->i_ino;
+               fid64->parent_gen = parent->i_generation;
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
                fid64->ino = XFS_I(inode)->i_ino;
index 8d214b87f6bb06ed1f7ed204cdfda8da9345172f..9f7ec15a65222e2fe318e0ab81ac9cca0a664b4a 100644 (file)
@@ -586,8 +586,11 @@ restart:
         * lock above.  Eventually we should look into a way to avoid
         * the pointless lock roundtrip.
         */
-       if (likely(!(file->f_mode & FMODE_NOCMTIME)))
-               file_update_time(file);
+       if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
+               error = file_update_time(file);
+               if (error)
+                       return error;
+       }
 
        /*
         * If we're writing the file then make sure to clear the setuid and
index 6cdbf90c6f7b50116534c6cb7de2d1ff91816f91..d041d47d9d86d13718b4c0c89047efe96e098aab 100644 (file)
@@ -504,6 +504,14 @@ xfs_inode_item_push(
                goto out_unlock;
        }
 
+       /*
+        * Stale inode items should force out the iclog.
+        */
+       if (ip->i_flags & XFS_ISTALE) {
+               rval = XFS_ITEM_PINNED;
+               goto out_unlock;
+       }
+
        /*
         * Someone else is already flushing the inode.  Nothing we can do
         * here but wait for the flush to finish and remove the item from
@@ -514,15 +522,6 @@ xfs_inode_item_push(
                goto out_unlock;
        }
 
-       /*
-        * Stale inode items should force out the iclog.
-        */
-       if (ip->i_flags & XFS_ISTALE) {
-               xfs_ifunlock(ip);
-               xfs_iunlock(ip, XFS_ILOCK_SHARED);
-               return XFS_ITEM_PINNED;
-       }
-
        ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
        ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
 
index 6b965bf450e44d5972fc689d486deec3dd5c8094..d90d4a388609af9da0cd40eb786a1fdd225a8d62 100644 (file)
 kmem_zone_t    *xfs_log_ticket_zone;
 
 /* Local miscellaneous function prototypes */
-STATIC int      xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
-                                   xlog_in_core_t **, xfs_lsn_t *);
+STATIC int
+xlog_commit_record(
+       struct xlog             *log,
+       struct xlog_ticket      *ticket,
+       struct xlog_in_core     **iclog,
+       xfs_lsn_t               *commitlsnp);
+
 STATIC xlog_t *  xlog_alloc_log(xfs_mount_t    *mp,
                                xfs_buftarg_t   *log_target,
                                xfs_daddr_t     blk_offset,
                                int             num_bblks);
-STATIC int      xlog_space_left(struct log *log, atomic64_t *head);
+STATIC int
+xlog_space_left(
+       struct xlog             *log,
+       atomic64_t              *head);
 STATIC int      xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
 STATIC void     xlog_dealloc_log(xlog_t *log);
 
@@ -64,8 +72,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t          *log,
                                     int                eventual_size);
 STATIC void xlog_state_want_sync(xlog_t        *log, xlog_in_core_t *iclog);
 
-STATIC void xlog_grant_push_ail(struct log     *log,
-                               int             need_bytes);
+STATIC void
+xlog_grant_push_ail(
+       struct xlog     *log,
+       int             need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t       *log,
                                           xlog_ticket_t *ticket);
 STATIC void xlog_ungrant_log_space(xlog_t       *log,
@@ -73,7 +83,9 @@ STATIC void xlog_ungrant_log_space(xlog_t      *log,
 
 #if defined(DEBUG)
 STATIC void    xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void    xlog_verify_grant_tail(struct log *log);
+STATIC void
+xlog_verify_grant_tail(
+       struct xlog     *log);
 STATIC void    xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
                                  int count, boolean_t syncing);
 STATIC void    xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
@@ -89,9 +101,9 @@ STATIC int   xlog_iclogs_empty(xlog_t *log);
 
 static void
 xlog_grant_sub_space(
-       struct log      *log,
-       atomic64_t      *head,
-       int             bytes)
+       struct xlog             *log,
+       atomic64_t              *head,
+       int                     bytes)
 {
        int64_t head_val = atomic64_read(head);
        int64_t new, old;
@@ -115,9 +127,9 @@ xlog_grant_sub_space(
 
 static void
 xlog_grant_add_space(
-       struct log      *log,
-       atomic64_t      *head,
-       int             bytes)
+       struct xlog             *log,
+       atomic64_t              *head,
+       int                     bytes)
 {
        int64_t head_val = atomic64_read(head);
        int64_t new, old;
@@ -165,7 +177,7 @@ xlog_grant_head_wake_all(
 
 static inline int
 xlog_ticket_reservation(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic)
 {
@@ -182,7 +194,7 @@ xlog_ticket_reservation(
 
 STATIC bool
 xlog_grant_head_wake(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        int                     *free_bytes)
 {
@@ -204,7 +216,7 @@ xlog_grant_head_wake(
 
 STATIC int
 xlog_grant_head_wait(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic,
        int                     need_bytes)
@@ -256,7 +268,7 @@ shutdown:
  */
 STATIC int
 xlog_grant_head_check(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic,
        int                     *need_bytes)
@@ -323,7 +335,7 @@ xfs_log_regrant(
        struct xfs_mount        *mp,
        struct xlog_ticket      *tic)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     need_bytes;
        int                     error = 0;
 
@@ -389,7 +401,7 @@ xfs_log_reserve(
        bool                    permanent,
        uint                    t_type)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_ticket      *tic;
        int                     need_bytes;
        int                     error = 0;
@@ -465,7 +477,7 @@ xfs_log_done(
        struct xlog_in_core     **iclog,
        uint                    flags)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        xfs_lsn_t               lsn = 0;
 
        if (XLOG_FORCED_SHUTDOWN(log) ||
@@ -810,6 +822,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
 void
 xfs_log_unmount(xfs_mount_t *mp)
 {
+       cancel_delayed_work_sync(&mp->m_sync_work);
        xfs_trans_ail_destroy(mp);
        xlog_dealloc_log(mp->m_log);
 }
@@ -838,7 +851,7 @@ void
 xfs_log_space_wake(
        struct xfs_mount        *mp)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     free_bytes;
 
        if (XLOG_FORCED_SHUTDOWN(log))
@@ -916,7 +929,7 @@ xfs_lsn_t
 xlog_assign_tail_lsn_locked(
        struct xfs_mount        *mp)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xfs_log_item     *lip;
        xfs_lsn_t               tail_lsn;
 
@@ -965,7 +978,7 @@ xlog_assign_tail_lsn(
  */
 STATIC int
 xlog_space_left(
-       struct log      *log,
+       struct xlog     *log,
        atomic64_t      *head)
 {
        int             free_bytes;
@@ -1277,7 +1290,7 @@ out:
  */
 STATIC int
 xlog_commit_record(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_ticket      *ticket,
        struct xlog_in_core     **iclog,
        xfs_lsn_t               *commitlsnp)
@@ -1311,7 +1324,7 @@ xlog_commit_record(
  */
 STATIC void
 xlog_grant_push_ail(
-       struct log      *log,
+       struct xlog     *log,
        int             need_bytes)
 {
        xfs_lsn_t       threshold_lsn = 0;
@@ -1790,7 +1803,7 @@ xlog_write_start_rec(
 
 static xlog_op_header_t *
 xlog_write_setup_ophdr(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_op_header   *ophdr,
        struct xlog_ticket      *ticket,
        uint                    flags)
@@ -1873,7 +1886,7 @@ xlog_write_setup_copy(
 
 static int
 xlog_write_copy_finish(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_in_core     *iclog,
        uint                    flags,
        int                     *record_cnt,
@@ -1958,7 +1971,7 @@ xlog_write_copy_finish(
  */
 int
 xlog_write(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *log_vector,
        struct xlog_ticket      *ticket,
        xfs_lsn_t               *start_lsn,
@@ -2821,7 +2834,7 @@ _xfs_log_force(
        uint                    flags,
        int                     *log_flushed)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
        xfs_lsn_t               lsn;
 
@@ -2969,7 +2982,7 @@ _xfs_log_force_lsn(
        uint                    flags,
        int                     *log_flushed)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
        int                     already_slept = 0;
 
@@ -3147,12 +3160,12 @@ xfs_log_ticket_get(
  */
 xlog_ticket_t *
 xlog_ticket_alloc(
-       struct log      *log,
+       struct xlog     *log,
        int             unit_bytes,
        int             cnt,
        char            client,
        bool            permanent,
-       int             alloc_flags)
+       xfs_km_flags_t  alloc_flags)
 {
        struct xlog_ticket *tic;
        uint            num_headers;
@@ -3278,7 +3291,7 @@ xlog_ticket_alloc(
  */
 void
 xlog_verify_dest_ptr(
-       struct log      *log,
+       struct xlog     *log,
        char            *ptr)
 {
        int i;
@@ -3307,7 +3320,7 @@ xlog_verify_dest_ptr(
  */
 STATIC void
 xlog_verify_grant_tail(
-       struct log      *log)
+       struct xlog     *log)
 {
        int             tail_cycle, tail_blocks;
        int             cycle, space;
index 7d6197c5849381e8d8f0f0e6fbef91f332f4e108..ddc4529d07d32fe69b2d8d54b7699f19768198d4 100644 (file)
@@ -44,7 +44,7 @@
  */
 static struct xlog_ticket *
 xlog_cil_ticket_alloc(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xlog_ticket *tic;
 
@@ -72,7 +72,7 @@ xlog_cil_ticket_alloc(
  */
 void
 xlog_cil_init_post_recovery(
-       struct log      *log)
+       struct xlog     *log)
 {
        log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
        log->l_cilp->xc_ctx->sequence = 1;
@@ -182,7 +182,7 @@ xlog_cil_prepare_log_vecs(
  */
 STATIC void
 xfs_cil_prepare_item(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *lv,
        int                     *len,
        int                     *diff_iovecs)
@@ -231,7 +231,7 @@ xfs_cil_prepare_item(
  */
 static void
 xlog_cil_insert_items(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *log_vector,
        struct xlog_ticket      *ticket)
 {
@@ -373,7 +373,7 @@ xlog_cil_committed(
  */
 STATIC int
 xlog_cil_push(
-       struct log              *log)
+       struct xlog             *log)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_log_vec      *lv;
@@ -601,7 +601,7 @@ xlog_cil_push_work(
  */
 static void
 xlog_cil_push_background(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xfs_cil  *cil = log->l_cilp;
 
@@ -629,7 +629,7 @@ xlog_cil_push_background(
 
 static void
 xlog_cil_push_foreground(
-       struct log      *log,
+       struct xlog     *log,
        xfs_lsn_t       push_seq)
 {
        struct xfs_cil  *cil = log->l_cilp;
@@ -683,7 +683,7 @@ xfs_log_commit_cil(
        xfs_lsn_t               *commit_lsn,
        int                     flags)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     log_flags = 0;
        struct xfs_log_vec      *log_vector;
 
@@ -754,7 +754,7 @@ xfs_log_commit_cil(
  */
 xfs_lsn_t
 xlog_cil_force_lsn(
-       struct log      *log,
+       struct xlog     *log,
        xfs_lsn_t       sequence)
 {
        struct xfs_cil          *cil = log->l_cilp;
@@ -833,7 +833,7 @@ xfs_log_item_in_current_chkpt(
  */
 int
 xlog_cil_init(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xfs_cil  *cil;
        struct xfs_cil_ctx *ctx;
@@ -869,7 +869,7 @@ xlog_cil_init(
 
 void
 xlog_cil_destroy(
-       struct log      *log)
+       struct xlog     *log)
 {
        if (log->l_cilp->xc_ctx) {
                if (log->l_cilp->xc_ctx->ticket)
index 735ff1ee53da447eee9c5b88d54e9007ee988138..72eba2201b1449aee4d52f5c0863549c9aedf7e9 100644 (file)
@@ -19,7 +19,7 @@
 #define __XFS_LOG_PRIV_H__
 
 struct xfs_buf;
-struct log;
+struct xlog;
 struct xlog_ticket;
 struct xfs_mount;
 
@@ -352,7 +352,7 @@ typedef struct xlog_in_core {
        struct xlog_in_core     *ic_next;
        struct xlog_in_core     *ic_prev;
        struct xfs_buf          *ic_bp;
-       struct log              *ic_log;
+       struct xlog             *ic_log;
        int                     ic_size;
        int                     ic_offset;
        int                     ic_bwritecnt;
@@ -409,7 +409,7 @@ struct xfs_cil_ctx {
  * operations almost as efficient as the old logging methods.
  */
 struct xfs_cil {
-       struct log              *xc_log;
+       struct xlog             *xc_log;
        struct list_head        xc_cil;
        spinlock_t              xc_cil_lock;
        struct xfs_cil_ctx      *xc_ctx;
@@ -487,7 +487,7 @@ struct xlog_grant_head {
  * overflow 31 bits worth of byte offset, so using a byte number will mean
  * that round off problems won't occur when releasing partial reservations.
  */
-typedef struct log {
+typedef struct xlog {
        /* The following fields don't need locking */
        struct xfs_mount        *l_mp;          /* mount point */
        struct xfs_ail          *l_ailp;        /* AIL log is working with */
@@ -553,9 +553,14 @@ extern int  xlog_recover_finish(xlog_t *log);
 extern void     xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 
 extern kmem_zone_t *xfs_log_ticket_zone;
-struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes,
-                               int count, char client, bool permanent,
-                               int alloc_flags);
+struct xlog_ticket *
+xlog_ticket_alloc(
+       struct xlog     *log,
+       int             unit_bytes,
+       int             count,
+       char            client,
+       bool            permanent,
+       xfs_km_flags_t  alloc_flags);
 
 
 static inline void
@@ -567,9 +572,14 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
 }
 
 void   xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
-int    xlog_write(struct log *log, struct xfs_log_vec *log_vector,
-                               struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
-                               xlog_in_core_t **commit_iclog, uint flags);
+int
+xlog_write(
+       struct xlog             *log,
+       struct xfs_log_vec      *log_vector,
+       struct xlog_ticket      *tic,
+       xfs_lsn_t               *start_lsn,
+       struct xlog_in_core     **commit_iclog,
+       uint                    flags);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
@@ -629,17 +639,23 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
 /*
  * Committed Item List interfaces
  */
-int    xlog_cil_init(struct log *log);
-void   xlog_cil_init_post_recovery(struct log *log);
-void   xlog_cil_destroy(struct log *log);
+int
+xlog_cil_init(struct xlog *log);
+void
+xlog_cil_init_post_recovery(struct xlog *log);
+void
+xlog_cil_destroy(struct xlog *log);
 
 /*
  * CIL force routines
  */
-xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
+xfs_lsn_t
+xlog_cil_force_lsn(
+       struct xlog *log,
+       xfs_lsn_t sequence);
 
 static inline void
-xlog_cil_force(struct log *log)
+xlog_cil_force(struct xlog *log)
 {
        xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
 }
index ca386909131a3f556a14868ee9354db352e3fe8f..a7be98abd6a90327ae9a0993ef65f2bc7f005e1c 100644 (file)
@@ -1471,8 +1471,8 @@ xlog_recover_add_item(
 
 STATIC int
 xlog_recover_add_to_cont_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        xfs_caddr_t             dp,
        int                     len)
 {
@@ -1517,8 +1517,8 @@ xlog_recover_add_to_cont_trans(
  */
 STATIC int
 xlog_recover_add_to_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        xfs_caddr_t             dp,
        int                     len)
 {
@@ -1588,8 +1588,8 @@ xlog_recover_add_to_trans(
  */
 STATIC int
 xlog_recover_reorder_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        int                     pass)
 {
        xlog_recover_item_t     *item, *n;
@@ -1642,8 +1642,8 @@ xlog_recover_reorder_trans(
  */
 STATIC int
 xlog_recover_buffer_pass1(
-       struct log              *log,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover_item        *item)
 {
        xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
        struct list_head        *bucket;
@@ -1696,7 +1696,7 @@ xlog_recover_buffer_pass1(
  */
 STATIC int
 xlog_check_buffer_cancelled(
-       struct log              *log,
+       struct xlog             *log,
        xfs_daddr_t             blkno,
        uint                    len,
        ushort                  flags)
@@ -2689,9 +2689,9 @@ xlog_recover_free_trans(
 
 STATIC int
 xlog_recover_commit_pass1(
-       struct log              *log,
-       struct xlog_recover     *trans,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover             *trans,
+       struct xlog_recover_item        *item)
 {
        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
 
@@ -2716,10 +2716,10 @@ xlog_recover_commit_pass1(
 
 STATIC int
 xlog_recover_commit_pass2(
-       struct log              *log,
-       struct xlog_recover     *trans,
-       struct list_head        *buffer_list,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover             *trans,
+       struct list_head                *buffer_list,
+       struct xlog_recover_item        *item)
 {
        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
 
@@ -2753,7 +2753,7 @@ xlog_recover_commit_pass2(
  */
 STATIC int
 xlog_recover_commit_trans(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_recover     *trans,
        int                     pass)
 {
@@ -2793,8 +2793,8 @@ out:
 
 STATIC int
 xlog_recover_unmount_trans(
-       struct log              *log,
-       xlog_recover_t          *trans)
+       struct xlog             *log,
+       struct xlog_recover     *trans)
 {
        /* Do nothing now */
        xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
index 8b89c5ac72d9bf777b48e3e653736507bd04b446..90c1fc9eaea4d9c7be48c5ef219e7598f6e47f54 100644 (file)
@@ -53,7 +53,7 @@ typedef struct xfs_trans_reservations {
 
 #include "xfs_sync.h"
 
-struct log;
+struct xlog;
 struct xfs_mount_args;
 struct xfs_inode;
 struct xfs_bmbt_irec;
@@ -133,7 +133,7 @@ typedef struct xfs_mount {
        uint                    m_readio_blocks; /* min read size blocks */
        uint                    m_writeio_log;  /* min write size log bytes */
        uint                    m_writeio_blocks; /* min write size blocks */
-       struct log              *m_log;         /* log specific stuff */
+       struct xlog             *m_log;         /* log specific stuff */
        int                     m_logbufs;      /* number of log buffers */
        int                     m_logbsize;     /* size of each log buffer */
        uint                    m_rsumlevels;   /* rt summary levels */
index c9d3409c5ca3f991b7bde133994c4920316a216c..1e9ee064dbb28c7cb491d4c41dce53007eebc7e9 100644 (file)
@@ -386,23 +386,23 @@ xfs_sync_worker(
         * We shouldn't write/force the log if we are in the mount/unmount
         * process or on a read only filesystem. The workqueue still needs to be
         * active in both cases, however, because it is used for inode reclaim
-        * during these times.  Use the s_umount semaphore to provide exclusion
-        * with unmount.
+        * during these times.  Use the MS_ACTIVE flag to avoid doing anything
+        * during mount.  Doing work during unmount is avoided by calling
+        * cancel_delayed_work_sync on this work queue before tearing down
+        * the ail and the log in xfs_log_unmount.
         */
-       if (down_read_trylock(&mp->m_super->s_umount)) {
-               if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-                       /* dgc: errors ignored here */
-                       if (mp->m_super->s_frozen == SB_UNFROZEN &&
-                           xfs_log_need_covered(mp))
-                               error = xfs_fs_log_dummy(mp);
-                       else
-                               xfs_log_force(mp, 0);
-
-                       /* start pushing all the metadata that is currently
-                        * dirty */
-                       xfs_ail_push_all(mp->m_ail);
-               }
-               up_read(&mp->m_super->s_umount);
+       if (!(mp->m_super->s_flags & MS_ACTIVE) &&
+           !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               /* dgc: errors ignored here */
+               if (mp->m_super->s_frozen == SB_UNFROZEN &&
+                   xfs_log_need_covered(mp))
+                       error = xfs_fs_log_dummy(mp);
+               else
+                       xfs_log_force(mp, 0);
+
+               /* start pushing all the metadata that is currently
+                * dirty */
+               xfs_ail_push_all(mp->m_ail);
        }
 
        /* queue us up again */
index 7cf9d3529e5112c39c25e007bb65c9c1774759ec..caf5dabfd55347b292664e86654f04bbe62d7a98 100644 (file)
@@ -32,7 +32,7 @@ struct xfs_da_node_entry;
 struct xfs_dquot;
 struct xfs_log_item;
 struct xlog_ticket;
-struct log;
+struct xlog;
 struct xlog_recover;
 struct xlog_recover_item;
 struct xfs_buf_log_format;
@@ -762,7 +762,7 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_force);
 DEFINE_DQUOT_EVENT(xfs_dqflush_done);
 
 DECLARE_EVENT_CLASS(xfs_loggrant_class,
-       TP_PROTO(struct log *log, struct xlog_ticket *tic),
+       TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
        TP_ARGS(log, tic),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -830,7 +830,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
 
 #define DEFINE_LOGGRANT_EVENT(name) \
 DEFINE_EVENT(xfs_loggrant_class, name, \
-       TP_PROTO(struct log *log, struct xlog_ticket *tic), \
+       TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
        TP_ARGS(log, tic))
 DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
 DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
@@ -1664,7 +1664,7 @@ DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
 DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
-       TP_PROTO(struct log *log, struct xlog_recover *trans,
+       TP_PROTO(struct xlog *log, struct xlog_recover *trans,
                struct xlog_recover_item *item, int pass),
        TP_ARGS(log, trans, item, pass),
        TP_STRUCT__entry(
@@ -1698,7 +1698,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
 
 #define DEFINE_LOG_RECOVER_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_item_class, name, \
-       TP_PROTO(struct log *log, struct xlog_recover *trans, \
+       TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
                struct xlog_recover_item *item, int pass), \
        TP_ARGS(log, trans, item, pass))
 
@@ -1709,7 +1709,7 @@ DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
 DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
-       TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
+       TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
        TP_ARGS(log, buf_f),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -1739,7 +1739,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
 
 #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
-       TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
+       TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
        TP_ARGS(log, buf_f))
 
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
@@ -1752,7 +1752,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
-       TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
+       TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
        TP_ARGS(log, in_f),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -1790,7 +1790,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
 )
 #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
-       TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
+       TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
        TP_ARGS(log, in_f))
 
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
index cdf896fcbfa43810c83bfbc7a99f84f118a4d75d..fdf324508c5ee467c6055f0866e1be88c387942d 100644 (file)
@@ -584,7 +584,7 @@ xfs_trans_t *
 _xfs_trans_alloc(
        xfs_mount_t     *mp,
        uint            type,
-       uint            memflags)
+       xfs_km_flags_t  memflags)
 {
        xfs_trans_t     *tp;
 
index 7ab99e1898c8de10e875aff23599d140c9867c4b..7c37b533aa8e5c169f0ef98643f96958df3788df 100644 (file)
@@ -443,7 +443,7 @@ typedef struct xfs_trans {
  * XFS transaction mechanism exported interfaces.
  */
 xfs_trans_t    *xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t    *_xfs_trans_alloc(struct xfs_mount *, uint, uint);
+xfs_trans_t    *_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
 xfs_trans_t    *xfs_trans_dup(xfs_trans_t *);
 int            xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
                                  uint, uint);
index b0d62820ada173fb23066a611532934b926db99d..9e6e1c6eb60a94885f1e9c08038cc8e081f86547 100644 (file)
@@ -440,8 +440,8 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
 
 #else  /* CONFIG_ACPI */
 
-static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; }
-static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; }
+static inline int register_acpi_bus_type(void *bus) { return 0; }
+static inline int unregister_acpi_bus_type(void *bus) { return 0; }
 
 #endif                         /* CONFIG_ACPI */
 
index 53f91b1ae53a425f0c5c2b487dcb51e658affa77..2c85a0f647b7a44e7d2e0cbb20071a9e27b322f6 100644 (file)
@@ -8,6 +8,7 @@ header-y += int-ll64.h
 header-y += ioctl.h
 header-y += ioctls.h
 header-y += ipcbuf.h
+header-y += kvm_para.h
 header-y += mman-common.h
 header-y += mman.h
 header-y += msgbuf.h
index 4ae54e07de83d2b970d88807f662bcfe300f2ddc..a7b0914348fd0fce08ed7cb42bbe2c8fbacbf8f9 100644 (file)
@@ -28,5 +28,9 @@
 #error Inconsistent word size. Check asm/bitsperlong.h
 #endif
 
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_GENERIC_BITS_PER_LONG */
index 2520a6e241dc0967ef7f8e096436a19a6042e36a..7d10f962aa137776389daf226b42b8be9702e1bd 100644 (file)
@@ -3,10 +3,18 @@
 
 #include <linux/compiler.h>
 
+#ifdef CONFIG_GENERIC_BUG
+#define BUGFLAG_WARNING                (1 << 0)
+#define BUGFLAG_TAINT(taint)   (BUGFLAG_WARNING | ((taint) << 8))
+#define BUG_GET_TAINT(bug)     ((bug)->flags >> 8)
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
 #ifdef CONFIG_BUG
 
 #ifdef CONFIG_GENERIC_BUG
-#ifndef __ASSEMBLY__
 struct bug_entry {
 #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
        unsigned long   bug_addr;
@@ -23,12 +31,6 @@ struct bug_entry {
 #endif
        unsigned short  flags;
 };
-#endif         /* __ASSEMBLY__ */
-
-#define BUGFLAG_WARNING                (1 << 0)
-#define BUGFLAG_TAINT(taint)   (BUGFLAG_WARNING | ((taint) << 8))
-#define BUG_GET_TAINT(bug)     ((bug)->flags >> 8)
-
 #endif /* CONFIG_GENERIC_BUG */
 
 /*
@@ -60,7 +62,6 @@ struct bug_entry {
  * to provide better diagnostics.
  */
 #ifndef __WARN_TAINT
-#ifndef __ASSEMBLY__
 extern __printf(3, 4)
 void warn_slowpath_fmt(const char *file, const int line,
                       const char *fmt, ...);
@@ -69,7 +70,6 @@ void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
                             const char *fmt, ...);
 extern void warn_slowpath_null(const char *file, const int line);
 #define WANT_WARN_ON_SLOWPATH
-#endif
 #define __WARN()               warn_slowpath_null(__FILE__, __LINE__)
 #define __WARN_printf(arg...)  warn_slowpath_fmt(__FILE__, __LINE__, arg)
 #define __WARN_printf_taint(taint, arg...)                             \
@@ -202,4 +202,6 @@ extern void warn_slowpath_null(const char *file, const int line);
 # define WARN_ON_SMP(x)                        ({0;})
 #endif
 
+#endif /* __ASSEMBLY__ */
+
 #endif
index c544356b374b62285b325c2df1ab77008c2422d1..294b1e755ab26e7bb47d9f1800ae132fc6667950 100644 (file)
@@ -18,7 +18,7 @@ static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
 {
        if (dev)
                dev->cma_area = cma;
-       if (!dev || !dma_contiguous_default_area)
+       if (!dev && !dma_contiguous_default_area)
                dma_contiguous_default_area = cma;
 }
 
index 6f2b45a9b6bc425b7df6231f474516e1bd4c1344..ff4947b7a9762b6ea414aa8b0159af8abb90e9dc 100644 (file)
@@ -484,6 +484,16 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
        /*
         * The barrier will stabilize the pmdval in a register or on
         * the stack so that it will stop changing under the code.
+        *
+        * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+        * pmd_read_atomic is allowed to return a not atomic pmdval
+        * (for example pointing to an hugepage that has never been
+        * mapped in the pmd). The below checks will only care about
+        * the low part of the pmd with 32bit PAE x86 anyway, with the
+        * exception of pmd_none(). So the important thing is that if
+        * the low part of the pmd is found null, the high part will
+        * be also null or the pmd_none() check below would be
+        * confused.
         */
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        barrier();
index 91d44bd4dde32574bb6365a5526ac33a19992050..fe74fccf18db75742d240151ec358049861b7406 100644 (file)
@@ -23,10 +23,6 @@ typedef __kernel_ulong_t __kernel_ino_t;
 typedef unsigned int   __kernel_mode_t;
 #endif
 
-#ifndef __kernel_nlink_t
-typedef __kernel_ulong_t __kernel_nlink_t;
-#endif
-
 #ifndef __kernel_pid_t
 typedef int            __kernel_pid_t;
 #endif
index 73e45600f95def601badd64689d75a117ddcc579..bac55c2151131c9689e284a4b4a8b438ed223039 100644 (file)
@@ -54,7 +54,7 @@ struct drm_mode_object {
        struct drm_object_properties *properties;
 };
 
-#define DRM_OBJECT_MAX_PROPERTY 16
+#define DRM_OBJECT_MAX_PROPERTY 24
 struct drm_object_properties {
        int count;
        uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
index 6bd325fedc873ae4785a619e8a679f339d562485..19a240446fca657e9928e93defa3ead34905ee56 100644 (file)
@@ -31,7 +31,7 @@
 
 static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
 {
-       if (size != 0 && nmemb > ULONG_MAX / size)
+       if (size != 0 && nmemb > SIZE_MAX / size)
                return NULL;
 
        if (size * nmemb <= PAGE_SIZE)
@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
 /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
 static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
 {
-       if (size != 0 && nmemb > ULONG_MAX / size)
+       if (size != 0 && nmemb > SIZE_MAX / size)
                return NULL;
 
        if (size * nmemb <= PAGE_SIZE)
index 58d0bdab68dd081ae8daa6a06c19eef5bb47e4fe..a7aec391b7b7d68d59876f9f5bcde4888d257e87 100644 (file)
@@ -1,7 +1,3 @@
-/*
-   This file is auto-generated from the drm_pciids.txt in the DRM CVS
-   Please contact dri-devel@lists.sf.net to add new cards to this list
-*/
 #define radeon_PCI_IDS \
        {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index b6d7ce92eadd67a67db12b19bb0d39208b870511..68733587e700bb00b776114e4a19530425194143 100644 (file)
@@ -64,6 +64,7 @@ struct drm_exynos_gem_map_off {
  * A structure for mapping buffer.
  *
  * @handle: a handle to gem object created.
+ * @pad: just padding to be 64-bit aligned.
  * @size: memory size to be mapped.
  * @mapped: having user virtual address mmaped.
  *     - this variable would be filled by exynos gem module
@@ -72,7 +73,8 @@ struct drm_exynos_gem_map_off {
  */
 struct drm_exynos_gem_mmap {
        unsigned int handle;
-       unsigned int size;
+       unsigned int pad;
+       uint64_t size;
        uint64_t mapped;
 };
 
index 7185b8f15cedfabf6dedc0e945fdd7bfb6be0b45..8760be30b3750a8f4d8e8e01692ccf92bb3b1931 100644 (file)
@@ -226,6 +226,7 @@ header-y += kdev_t.h
 header-y += kernel.h
 header-y += kernelcapi.h
 header-y += kernel-page-flags.h
+header-y += kexec.h
 header-y += keyboard.h
 header-y += keyctl.h
 header-y += l2tp.h
index 2314ad8b3c9cced6a4679441d7c6b25afe500348..b1a520ec8b59dd0975bc4e738ce2398ca19dc363 100644 (file)
@@ -140,6 +140,7 @@ struct kiocb {
                (x)->ki_dtor = NULL;                    \
                (x)->ki_obj.tsk = tsk;                  \
                (x)->ki_user_data = 0;                  \
+               (x)->private = NULL;                    \
        } while (0)
 
 #define AIO_RING_MAGIC                 0xa10a10a1
index ba43f408baa38907a8f63a64314e07bb622a6e7f..4e72a9d48232d513b5b2fe44d973de8dfeb1e9e4 100644 (file)
@@ -46,16 +46,23 @@ struct blkcg_gq;
 struct request;
 typedef void (rq_end_io_fn)(struct request *, int);
 
+#define BLK_RL_SYNCFULL                (1U << 0)
+#define BLK_RL_ASYNCFULL       (1U << 1)
+
 struct request_list {
+       struct request_queue    *q;     /* the queue this rl belongs to */
+#ifdef CONFIG_BLK_CGROUP
+       struct blkcg_gq         *blkg;  /* blkg this request pool belongs to */
+#endif
        /*
         * count[], starved[], and wait[] are indexed by
         * BLK_RW_SYNC/BLK_RW_ASYNC
         */
-       int count[2];
-       int starved[2];
-       int elvpriv;
-       mempool_t *rq_pool;
-       wait_queue_head_t wait[2];
+       int                     count[2];
+       int                     starved[2];
+       mempool_t               *rq_pool;
+       wait_queue_head_t       wait[2];
+       unsigned int            flags;
 };
 
 /*
@@ -138,6 +145,7 @@ struct request {
        struct hd_struct *part;
        unsigned long start_time;
 #ifdef CONFIG_BLK_CGROUP
+       struct request_list *rl;                /* rl this rq is alloced from */
        unsigned long long start_time_ns;
        unsigned long long io_start_time_ns;    /* when passed to hardware */
 #endif
@@ -282,11 +290,16 @@ struct request_queue {
        struct list_head        queue_head;
        struct request          *last_merge;
        struct elevator_queue   *elevator;
+       int                     nr_rqs[2];      /* # allocated [a]sync rqs */
+       int                     nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
 
        /*
-        * the queue request freelist, one for reads and one for writes
+        * If blkcg is not used, @q->root_rl serves all requests.  If blkcg
+        * is used, root blkg allocates from @q->root_rl and all other
+        * blkgs from their own blkg->rl.  Which one to use should be
+        * determined using bio_request_list().
         */
-       struct request_list     rq;
+       struct request_list     root_rl;
 
        request_fn_proc         *request_fn;
        make_request_fn         *make_request_fn;
@@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq)
        return rw_is_sync(rq->cmd_flags);
 }
 
-static inline int blk_queue_full(struct request_queue *q, int sync)
+static inline bool blk_rl_full(struct request_list *rl, bool sync)
 {
-       if (sync)
-               return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
-       return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
+       unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+       return rl->flags & flag;
 }
 
-static inline void blk_set_queue_full(struct request_queue *q, int sync)
+static inline void blk_set_rl_full(struct request_list *rl, bool sync)
 {
-       if (sync)
-               queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
-       else
-               queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
+       unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+       rl->flags |= flag;
 }
 
-static inline void blk_clear_queue_full(struct request_queue *q, int sync)
+static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
 {
-       if (sync)
-               queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
-       else
-               queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
+       unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+       rl->flags &= ~flag;
 }
 
 
@@ -827,7 +838,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
 extern void blk_unprep_request(struct request *);
 
 /*
@@ -912,11 +922,15 @@ struct blk_plug {
 };
 #define BLK_MAX_REQUEST_COUNT 16
 
+struct blk_plug_cb;
+typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
 struct blk_plug_cb {
        struct list_head list;
-       void (*callback)(struct blk_plug_cb *);
+       blk_plug_cb_fn callback;
+       void *data;
 };
-
+extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
+                                            void *data, int size);
 extern void blk_start_plug(struct blk_plug *);
 extern void blk_finish_plug(struct blk_plug *);
 extern void blk_flush_plug_list(struct blk_plug *, bool);
index 324fe08ea3b140b7b8b92f7129ad334df2e260d5..6d6795d46a7509f01e4a12993e9c7de34b02d975 100644 (file)
@@ -91,6 +91,11 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                  unsigned long size,
                                  unsigned long align,
                                  unsigned long goal);
+void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+                                 unsigned long size,
+                                 unsigned long align,
+                                 unsigned long goal,
+                                 unsigned long limit);
 extern void *__alloc_bootmem_low(unsigned long size,
                                 unsigned long align,
                                 unsigned long goal);
index f55ab8cdc10630f75131f96324278fd56cc00e81..4d0fb3df2f4adaa584e13e963b7bc6ffa02a4e26 100644 (file)
@@ -67,7 +67,6 @@ void bsg_job_done(struct bsg_job *job, int result,
 int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
                    bsg_job_fn *job_fn, int dd_job_size);
 void bsg_request_fn(struct request_queue *q);
-void bsg_remove_queue(struct request_queue *q);
 void bsg_goose_queue(struct request_queue *q);
 
 #endif
index 68d56effc32860ac29a26487316fcf75b4642261..d10b7ed595b15c07c58f30e08ca841dafb2aa0a1 100644 (file)
@@ -360,11 +360,11 @@ struct cpu_vfs_cap_data {
 
 #define CAP_WAKE_ALARM            35
 
-/* Allow preventing system suspends while epoll events are pending */
+/* Allow preventing system suspends */
 
-#define CAP_EPOLLWAKEUP      36
+#define CAP_BLOCK_SUSPEND    36
 
-#define CAP_LAST_CAP         CAP_EPOLLWAKEUP
+#define CAP_LAST_CAP         CAP_BLOCK_SUSPEND
 
 #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
 
index aa13392a7efbf2234add8465540869b75930a25b..d4080f309b5699d5fbaa5dbe96d38a1977be7693 100644 (file)
 struct ceph_auth_client;
 struct ceph_authorizer;
 
+struct ceph_auth_handshake {
+       struct ceph_authorizer *authorizer;
+       void *authorizer_buf;
+       size_t authorizer_buf_len;
+       void *authorizer_reply_buf;
+       size_t authorizer_reply_buf_len;
+};
+
 struct ceph_auth_client_ops {
        const char *name;
 
@@ -43,9 +51,7 @@ struct ceph_auth_client_ops {
         * the response to authenticate the service.
         */
        int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
-                                struct ceph_authorizer **a,
-                                void **buf, size_t *len,
-                                void **reply_buf, size_t *reply_len);
+                                struct ceph_auth_handshake *auth);
        int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
                                       struct ceph_authorizer *a, size_t len);
        void (*destroy_authorizer)(struct ceph_auth_client *ac,
index b8c60694b2b0977d4ecdb982d8bcaea5f0ef2673..e81ab30d4896329e29d47bea33d92e5634da6a89 100644 (file)
@@ -65,7 +65,7 @@ struct ceph_file_layout {
        __le32 fl_object_stripe_unit;  /* UNUSED.  for per-object parity, if any */
 
        /* object -> pg layout */
-       __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
+       __le32 fl_unused;       /* unused; used to be preferred primary (-1) */
        __le32 fl_pg_pool;      /* namespace, crush ruleset, rep level */
 } __attribute__ ((packed));
 
@@ -384,7 +384,7 @@ union ceph_mds_request_args {
                __le32 stripe_count;         /* ... */
                __le32 object_size;
                __le32 file_replication;
-               __le32 preferred;
+               __le32 unused;               /* used to be preferred osd */
        } __attribute__ ((packed)) open;
        struct {
                __le32 flags;
index 220ae21e819b1fb2623d19d8cf4f619862f11c42..d8615dee5808d3f55c93a38c6fdb66113f09a691 100644 (file)
@@ -46,9 +46,14 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n)
 /*
  * bounds check input.
  */
+static inline int ceph_has_room(void **p, void *end, size_t n)
+{
+       return end >= *p && n <= end - *p;
+}
+
 #define ceph_decode_need(p, end, n, bad)               \
        do {                                            \
-               if (unlikely(*(p) + (n) > (end)))       \
+               if (!likely(ceph_has_room(p, end, n)))  \
                        goto bad;                       \
        } while (0)
 
@@ -167,7 +172,7 @@ static inline void ceph_encode_string(void **p, void *end,
 
 #define ceph_encode_need(p, end, n, bad)               \
        do {                                            \
-               if (unlikely(*(p) + (n) > (end)))       \
+               if (!likely(ceph_has_room(p, end, n)))  \
                        goto bad;                       \
        } while (0)
 
index 3bff047f6b0f19d1037e4e7157fc785dd213f4cc..44c87e731e9d4c5fff13cbf851694cd41356fb04 100644 (file)
@@ -25,9 +25,9 @@ struct ceph_connection_operations {
        void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
 
        /* authorize an outgoing connection */
-       int (*get_authorizer) (struct ceph_connection *con,
-                              void **buf, int *len, int *proto,
-                              void **reply_buf, int *reply_len, int force_new);
+       struct ceph_auth_handshake *(*get_authorizer) (
+                               struct ceph_connection *con,
+                              int *proto, int force_new);
        int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
        int (*invalidate_authorizer)(struct ceph_connection *con);
 
@@ -163,16 +163,8 @@ struct ceph_connection {
 
        /* connection negotiation temps */
        char in_banner[CEPH_BANNER_MAX_LEN];
-       union {
-               struct {  /* outgoing connection */
-                       struct ceph_msg_connect out_connect;
-                       struct ceph_msg_connect_reply in_reply;
-               };
-               struct {  /* incoming */
-                       struct ceph_msg_connect in_connect;
-                       struct ceph_msg_connect_reply out_reply;
-               };
-       };
+       struct ceph_msg_connect out_connect;
+       struct ceph_msg_connect_reply in_reply;
        struct ceph_entity_addr actual_peer_addr;
 
        /* message out temps */
index 7c05ac202d90650069d4713ac2e13b3be9b86024..cedfb1a8434a11a0ba0b32348a0687e0ff9e7836 100644 (file)
@@ -6,9 +6,10 @@
 #include <linux/mempool.h>
 #include <linux/rbtree.h>
 
-#include "types.h"
-#include "osdmap.h"
-#include "messenger.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/auth.h>
 
 /* 
  * Maximum object name size 
@@ -40,9 +41,7 @@ struct ceph_osd {
        struct list_head o_requests;
        struct list_head o_linger_requests;
        struct list_head o_osd_lru;
-       struct ceph_authorizer *o_authorizer;
-       void *o_authorizer_buf, *o_authorizer_reply_buf;
-       size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
+       struct ceph_auth_handshake o_auth;
        unsigned long lru_ttl;
        int o_marked_for_keepalive;
        struct list_head o_keepalive_item;
index ba4c205cbb016a141495e2e872ee6a3cfd57253a..311ef8d6aa9efc41b89ea1e6af9b07f538a84440 100644 (file)
@@ -65,8 +65,6 @@ struct ceph_osdmap {
 #define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
 #define ceph_file_layout_object_su(l) \
        ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_preferred(l) \
-       ((__s32)le32_to_cpu((l).fl_pg_preferred))
 #define ceph_file_layout_pg_pool(l) \
        ((__s32)le32_to_cpu((l).fl_pg_pool))
 
index 81e803e90aa43ac42a48b678869333fa8c0e4f4e..acba894374a1537b4b961eee222e9c28b53845cf 100644 (file)
@@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch,
                               struct clock_event_device *evt);
 extern void clockevents_register_device(struct clock_event_device *dev);
 
+extern void clockevents_config(struct clock_event_device *dev, u32 freq);
 extern void clockevents_config_and_register(struct clock_event_device *dev,
                                            u32 freq, unsigned long min_delta,
                                            unsigned long max_delta);
index e988037abd2a1afa25b9e22607295c0e915f1541..51a90b7f2d606a3dcb9bb582cd209add0c429351 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _LINUX_COMPACTION_H
 #define _LINUX_COMPACTION_H
 
-#include <linux/node.h>
-
 /* Return values for compact_zone() and try_to_compact_pages() */
 /* compaction didn't start as it was not possible or direct reclaim was more suitable */
 #define COMPACT_SKIPPED                0
 /* The full zone was compacted */
 #define COMPACT_COMPLETE       3
 
-/*
- * compaction supports three modes
- *
- * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
- *    MIGRATE_MOVABLE pageblocks as migration sources and targets.
- * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
- *    MIGRATE_MOVABLE pageblocks as migration sources.
- *    MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
- *    targets and convers them to MIGRATE_MOVABLE if possible
- * COMPACT_SYNC uses synchronous migration and scans all pageblocks
- */
-enum compact_mode {
-       COMPACT_ASYNC_MOVABLE,
-       COMPACT_ASYNC_UNMOVABLE,
-       COMPACT_SYNC,
-};
-
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
index 5d46217f84adfaab0dbe679a7612da7062bb72c6..4e890394ef996e709c490439be23f0c6fe24292f 100644 (file)
@@ -577,8 +577,7 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
                const struct compat_iovec __user *uvector,
                unsigned long nr_segs,
                unsigned long fast_segs, struct iovec *fast_pointer,
-               struct iovec **ret_pointer,
-               int check_access);
+               struct iovec **ret_pointer);
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
index e5834aa24b9ec2a287026d58caf097bf438df055..6a6d7aefe12d6e61cae9e44b13aea1ed74ebedda 100644 (file)
@@ -47,9 +47,9 @@
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-# define inline                inline          __attribute__((always_inline))
-# define __inline__    __inline__      __attribute__((always_inline))
-# define __inline      __inline        __attribute__((always_inline))
+# define inline                inline          __attribute__((always_inline)) notrace
+# define __inline__    __inline__      __attribute__((always_inline)) notrace
+# define __inline      __inline        __attribute__((always_inline)) notrace
 #else
 /* A lot of inline functions can cause havoc with function tracing */
 # define inline                inline          notrace
index 7230bb59a06fec1f09ec379027729ee472193c07..2e9b9ebbeb78927681026ddbceb0255bc9197aad 100644 (file)
@@ -177,6 +177,7 @@ extern void put_online_cpus(void);
 #define hotcpu_notifier(fn, pri)       cpu_notifier(fn, pri)
 #define register_hotcpu_notifier(nb)   register_cpu_notifier(nb)
 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
index 917dc5aeb1d4de6eff77341bc55c67df68c77fb6..ebbed2ce66379bd986fbf83f11e7ae8c32bf6070 100644 (file)
@@ -277,17 +277,13 @@ static inline void put_cred(const struct cred *_cred)
  * @task: The task to query
  *
  * Access the objective credentials of a task.  The caller must hold the RCU
- * readlock or the task must be dead and unable to change its own credentials.
+ * readlock.
  *
  * The result of this function should not be passed directly to get_cred();
  * rather get_task_cred() should be used instead.
  */
-#define __task_cred(task)                                              \
-       ({                                                              \
-               const struct task_struct *__t = (task);                 \
-               rcu_dereference_check(__t->real_cred,                   \
-                                     task_is_dead(__t));               \
-       })
+#define __task_cred(task)      \
+       rcu_dereference((task)->real_cred)
 
 /**
  * get_current_cred - Get the current task's subjective credentials
index 97e435b191f411380bbc546530c725093095231c..7c4750811b966e7d865484c2cf7020199c628164 100644 (file)
@@ -151,16 +151,6 @@ struct crush_map {
        struct crush_bucket **buckets;
        struct crush_rule **rules;
 
-       /*
-        * Parent pointers to identify the parent bucket a device or
-        * bucket in the hierarchy.  If an item appears more than
-        * once, this is the _last_ time it appeared (where buckets
-        * are processed in bucket id order, from -1 on down to
-        * -max_buckets.
-        */
-       __u32 *bucket_parents;
-       __u32 *device_parents;
-
        __s32 max_buckets;
        __u32 max_rules;
        __s32 max_devices;
@@ -168,8 +158,7 @@ struct crush_map {
 
 
 /* crush.c */
-extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
-extern void crush_calc_parents(struct crush_map *map);
+extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
 extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
 extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
 extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
@@ -177,4 +166,9 @@ extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
 extern void crush_destroy_bucket(struct crush_bucket *b);
 extern void crush_destroy(struct crush_map *map);
 
+static inline int crush_calc_tree_node(int i)
+{
+       return ((i+1) << 1)-1;
+}
+
 #endif
index c46b99c18bb0ca772f87c567f4ab46cec54932db..71d79f44a7d0753faeb61a3072efdbacc1aa2371 100644 (file)
 
 #include "crush.h"
 
-extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
-extern int crush_do_rule(struct crush_map *map,
+extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
+extern int crush_do_rule(const struct crush_map *map,
                         int ruleno,
                         int x, int *result, int result_max,
-                        int forcefeed,    /* -1 for none */
-                        __u32 *weights);
+                        const __u32 *weights);
 
 #endif
index 161d96241b1b4da3b9a0909749709a33bcca33c5..6de94151ff6f7e8646ab16d7f093cea3c079f1fa 100644 (file)
@@ -865,8 +865,6 @@ extern int (*platform_notify_remove)(struct device *dev);
 extern struct device *get_device(struct device *dev);
 extern void put_device(struct device *dev);
 
-extern void wait_for_device_probe(void);
-
 #ifdef CONFIG_DEVTMPFS
 extern int devtmpfs_create_node(struct device *dev);
 extern int devtmpfs_delete_node(struct device *dev);
index d3fec584e8c3e93d253fce27d910b0bafdede38b..56377df391242d4639db14d9b23a8fa915db9335 100644 (file)
@@ -635,6 +635,18 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
                                                  dir, flags, NULL);
 }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+       struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+       enum dma_transfer_direction dir, unsigned long flags,
+       struct rio_dma_ext *rio_ext)
+{
+       return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+                                                 dir, flags, rio_ext);
+}
+#endif
+
 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
                size_t period_len, enum dma_transfer_direction dir)
index 2d09bfa5c2628a3e1e396350b7d9d14f1a2791c8..e0de516374da37de6a95c35e79ab7cabb899d177 100644 (file)
@@ -17,6 +17,7 @@
 #define ENOIOCTLCMD    515     /* No ioctl command */
 #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
 #define EPROBE_DEFER   517     /* Driver requests probe retry */
+#define EOPENSTALE     518     /* open found a stale dentry */
 
 /* Defined for the NFSv3 protocol */
 #define EBADHANDLE     521     /* Illegal NFS file handle */
index 91bb4f27238cf156cb3ecc3daac22d37684aebd3..3c3ef19a625a26a38944cb06afca97b872d48234 100644 (file)
@@ -34,7 +34,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
 struct file *eventfd_fget(int fd);
 struct eventfd_ctx *eventfd_ctx_fdget(int fd);
 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-int eventfd_signal(struct eventfd_ctx *ctx, int n);
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
                                  __u64 *cnt);
index 6f8be328770abb3c11166755d9c954194cfac1ae..f4bb378ccf6a355bbe49e79f56019f9ef386d1d4 100644 (file)
@@ -34,7 +34,7 @@
  * re-allowed until epoll_wait is called again after consuming the wakeup
  * event(s).
  *
- * Requires CAP_EPOLLWAKEUP
+ * Requires CAP_BLOCK_SUSPEND
  */
 #define EPOLLWAKEUP (1 << 29)
 
index 3a4cef5322dcab4d6b50b96243fa7187b2da1ebd..12291a7ee2759164026ac602ab1712b5d66faef0 100644 (file)
@@ -165,8 +165,8 @@ struct fid {
  */
 
 struct export_operations {
-       int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
-                       int connectable);
+       int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
+                       struct inode *parent);
        struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
                        int fh_len, int fh_type);
        struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
index a3229d7ab9f26eb257950119c25a788df5fd9fc5..ac3f1c605843201e8e3e4561bc84d2b910b9f5ba 100644 (file)
@@ -611,6 +611,7 @@ struct fb_deferred_io {
        struct mutex lock; /* mutex that protects the page list */
        struct list_head pagelist; /* list of touched pages */
        /* callback */
+       void (*first_io)(struct fb_info *info);
        void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
 };
 #endif
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
new file mode 100644 (file)
index 0000000..0e4e2ee
--- /dev/null
@@ -0,0 +1,127 @@
+#ifndef _LINUX_FRONTSWAP_H
+#define _LINUX_FRONTSWAP_H
+
+#include <linux/swap.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+struct frontswap_ops {
+       void (*init)(unsigned);
+       int (*store)(unsigned, pgoff_t, struct page *);
+       int (*load)(unsigned, pgoff_t, struct page *);
+       void (*invalidate_page)(unsigned, pgoff_t);
+       void (*invalidate_area)(unsigned);
+};
+
+extern bool frontswap_enabled;
+extern struct frontswap_ops
+       frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_shrink(unsigned long);
+extern unsigned long frontswap_curr_pages(void);
+extern void frontswap_writethrough(bool);
+
+extern void __frontswap_init(unsigned type);
+extern int __frontswap_store(struct page *page);
+extern int __frontswap_load(struct page *page);
+extern void __frontswap_invalidate_page(unsigned, pgoff_t);
+extern void __frontswap_invalidate_area(unsigned);
+
+#ifdef CONFIG_FRONTSWAP
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+       bool ret = false;
+
+       if (frontswap_enabled && sis->frontswap_map)
+               ret = test_bit(offset, sis->frontswap_map);
+       return ret;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+       if (frontswap_enabled && sis->frontswap_map)
+               set_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+       if (frontswap_enabled && sis->frontswap_map)
+               clear_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+                                    unsigned long *map)
+{
+       p->frontswap_map = map;
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+       return p->frontswap_map;
+}
+#else
+/* all inline routines become no-ops and all externs are ignored */
+
+#define frontswap_enabled (0)
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+       return false;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+                                    unsigned long *map)
+{
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+       return NULL;
+}
+#endif
+
+static inline int frontswap_store(struct page *page)
+{
+       int ret = -1;
+
+       if (frontswap_enabled)
+               ret = __frontswap_store(page);
+       return ret;
+}
+
+static inline int frontswap_load(struct page *page)
+{
+       int ret = -1;
+
+       if (frontswap_enabled)
+               ret = __frontswap_load(page);
+       return ret;
+}
+
+static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+       if (frontswap_enabled)
+               __frontswap_invalidate_page(type, offset);
+}
+
+static inline void frontswap_invalidate_area(unsigned type)
+{
+       if (frontswap_enabled)
+               __frontswap_invalidate_area(type);
+}
+
+static inline void frontswap_init(unsigned type)
+{
+       if (frontswap_enabled)
+               __frontswap_init(type);
+}
+
+#endif /* _LINUX_FRONTSWAP_H */
index 038076b27ea467c4a4c14dd9b49a51f3eec9f24c..17fd887c798f3fff64aaf27bf82a81a2d237ee31 100644 (file)
@@ -173,6 +173,15 @@ struct inodes_stat_t {
 #define WRITE_FUA              (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
 #define WRITE_FLUSH_FUA                (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
 
+
+/*
+ * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
+ * that indicates that they should check the contents of the iovec are
+ * valid, but not check the memory that the iovec elements
+ * points too.
+ */
+#define CHECK_IOVEC_ONLY -1
+
 #define SEL_IN         1
 #define SEL_OUT                2
 #define SEL_EX         4
@@ -793,13 +802,14 @@ struct inode {
                unsigned int __i_nlink;
        };
        dev_t                   i_rdev;
+       loff_t                  i_size;
        struct timespec         i_atime;
        struct timespec         i_mtime;
        struct timespec         i_ctime;
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
        unsigned short          i_bytes;
+       unsigned int            i_blkbits;
        blkcnt_t                i_blocks;
-       loff_t                  i_size;
 
 #ifdef __NEED_I_SIZE_ORDERED
        seqcount_t              i_size_seqcount;
@@ -819,9 +829,8 @@ struct inode {
                struct list_head        i_dentry;
                struct rcu_head         i_rcu;
        };
-       atomic_t                i_count;
-       unsigned int            i_blkbits;
        u64                     i_version;
+       atomic_t                i_count;
        atomic_t                i_dio_count;
        atomic_t                i_writecount;
        const struct file_operations    *i_fop; /* former ->i_op->default_file_ops */
@@ -1683,6 +1692,7 @@ struct inode_operations {
        int (*removexattr) (struct dentry *, const char *);
        int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
                      u64 len);
+       int (*update_time)(struct inode *, struct timespec *, int);
 } ____cacheline_aligned;
 
 struct seq_file;
@@ -1690,8 +1700,7 @@ struct seq_file;
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              unsigned long nr_segs, unsigned long fast_segs,
                              struct iovec *fast_pointer,
-                             struct iovec **ret_pointer,
-                             int check_access);
+                             struct iovec **ret_pointer);
 
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -1842,6 +1851,13 @@ static inline void inode_inc_iversion(struct inode *inode)
        spin_unlock(&inode->i_lock);
 }
 
+enum file_time_flags {
+       S_ATIME = 1,
+       S_MTIME = 2,
+       S_CTIME = 4,
+       S_VERSION = 8,
+};
+
 extern void touch_atime(struct path *);
 static inline void file_accessed(struct file *file)
 {
@@ -2453,8 +2469,6 @@ enum {
 };
 
 void dio_end_io(struct bio *bio, int error);
-void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
 
 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        struct block_device *bdev, const struct iovec *iov, loff_t offset,
@@ -2469,12 +2483,11 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
                                    offset, nr_segs, get_block, NULL, NULL,
                                    DIO_LOCKING | DIO_SKIP_HOLES);
 }
-#else
-static inline void inode_dio_wait(struct inode *inode)
-{
-}
 #endif
 
+void inode_dio_wait(struct inode *inode);
+void inode_dio_done(struct inode *inode);
+
 extern const struct file_operations generic_ro_fops;
 
 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
@@ -2578,7 +2591,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *);
 extern int inode_newsize_ok(const struct inode *, loff_t offset);
 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
 
-extern void file_update_time(struct file *file);
+extern int file_update_time(struct file *file);
 
 extern int generic_show_options(struct seq_file *m, struct dentry *root);
 extern void save_mount_options(struct super_block *sb, char *options);
index 91d0e0a34ef3185a6051d8394cab63dfb76a04cb..63d966d5c2ea7a382c2f42cc664c7804dec86f73 100644 (file)
@@ -60,7 +60,7 @@
 #define FS_EVENTS_POSS_ON_CHILD   (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
                                   FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
                                   FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
-                                  FS_DELETE)
+                                  FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
 
 #define FS_MOVE                        (FS_MOVED_FROM | FS_MOVED_TO)
 
index 8f2ab8fef929f42b81f3d9a75c564b42455dc06e..9303348965fbdbc8a8d6d706949b543d54c5609b 100644 (file)
@@ -54,6 +54,9 @@
  * 7.18
  *  - add FUSE_IOCTL_DIR flag
  *  - add FUSE_NOTIFY_DELETE
+ *
+ * 7.19
+ *  - add FUSE_FALLOCATE
  */
 
 #ifndef _LINUX_FUSE_H
@@ -85,7 +88,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 18
+#define FUSE_KERNEL_MINOR_VERSION 19
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -278,6 +281,7 @@ enum fuse_opcode {
        FUSE_POLL          = 40,
        FUSE_NOTIFY_REPLY  = 41,
        FUSE_BATCH_FORGET  = 42,
+       FUSE_FALLOCATE     = 43,
 
        /* CUSE specific operations */
        CUSE_INIT          = 4096,
@@ -571,6 +575,14 @@ struct fuse_notify_poll_wakeup_out {
        __u64   kh;
 };
 
+struct fuse_fallocate_in {
+       __u64   fh;
+       __u64   offset;
+       __u64   length;
+       __u32   mode;
+       __u32   padding;
+};
+
 struct fuse_in_header {
        __u32   len;
        __u32   opcode;
index 73c28dea10ae395f1a7a7f4a517f174578dde313..7a114016ac7de83cf44190ebadec99b12f924cf4 100644 (file)
@@ -110,6 +110,9 @@ extern int lockdep_genl_is_held(void);
 #define genl_dereference(p)                                    \
        rcu_dereference_protected(p, lockdep_genl_is_held())
 
+#define MODULE_ALIAS_GENL_FAMILY(family)\
+ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
+
 #endif /* __KERNEL__ */
 
 #endif /* __LINUX_GENERIC_NETLINK_H */
index f07fc2d081598ba7c2432a2306081473d3e0bbe1..2e31e8b3a190bb652bb597104139d2f2b440d347 100644 (file)
@@ -22,8 +22,8 @@
 /* Gpio pin is open source */
 #define GPIOF_OPEN_SOURCE      (1 << 3)
 
-#define GPIOF_EXPORT           (1 << 2)
-#define GPIOF_EXPORT_CHANGEABLE        (1 << 3)
+#define GPIOF_EXPORT           (1 << 4)
+#define GPIOF_EXPORT_CHANGEABLE        (1 << 5)
 #define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
 #define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
 
index fd0dc30c9f154af94155b8c8c47e0a228fbd2573..cc07d2777bbe6b11a632840c5f0a867436bbeac5 100644 (file)
@@ -165,6 +165,7 @@ enum  hrtimer_base_type {
  * @lock:              lock protecting the base and associated clock bases
  *                     and timers
  * @active_bases:      Bitfield to mark bases with active timers
+ * @clock_was_set:     Indicates that clock was set from irq context.
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
  * @hres_active:       State of high resolution mode
@@ -177,7 +178,8 @@ enum  hrtimer_base_type {
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
-       unsigned long                   active_bases;
+       unsigned int                    active_bases;
+       unsigned int                    clock_was_set;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
        int                             hres_active;
@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
 # define MONOTONIC_RES_NSEC    HIGH_RES_NSEC
 # define KTIME_MONOTONIC_RES   KTIME_HIGH_RES
 
+extern void clock_was_set_delayed(void);
+
 #else
 
 # define MONOTONIC_RES_NSEC    LOW_RES_NSEC
@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
        return 0;
 }
+
+static inline void clock_was_set_delayed(void) { }
+
 #endif
 
 extern void clock_was_set(void);
@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_real(void);
 extern ktime_t ktime_get_boottime(void);
 extern ktime_t ktime_get_monotonic_offset(void);
+extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
new file mode 100644 (file)
index 0000000..a65c864
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * i2c-mux-pinctrl platform data
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_I2C_MUX_PINCTRL_H
+#define _LINUX_I2C_MUX_PINCTRL_H
+
+/**
+ * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
+ * @parent_bus_num: Parent I2C bus number
+ * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
+ * @bus_count: Number of child busses. Also the number of elements in
+ *     @pinctrl_states
+ * @pinctrl_states: The names of the pinctrl state to select for each child bus
+ * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
+ *     accessed. If NULL, the most recently used pinctrl state will be left
+ *     selected.
+ */
+struct i2c_mux_pinctrl_platform_data {
+       int parent_bus_num;
+       int base_bus_num;
+       int bus_count;
+       const char **pinctrl_states;
+       const char *pinctrl_state_idle;
+};
+
+#endif
index b66cb601435fa732388a3f2c79b3b1af00ebbc4b..ddfa04108baf14ade92a85d3a3f909ca9ab30589 100644 (file)
@@ -541,7 +541,7 @@ struct i2c_msg {
        __u16 flags;
 #define I2C_M_TEN              0x0010  /* this is a ten bit chip address */
 #define I2C_M_RD               0x0001  /* read data, from slave to master */
-#define I2C_M_NOSTART          0x4000  /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART          0x4000  /* if I2C_FUNC_NOSTART */
 #define I2C_M_REV_DIR_ADDR     0x2000  /* if I2C_FUNC_PROTOCOL_MANGLING */
 #define I2C_M_IGNORE_NAK       0x1000  /* if I2C_FUNC_PROTOCOL_MANGLING */
 #define I2C_M_NO_RD_ACK                0x0800  /* if I2C_FUNC_PROTOCOL_MANGLING */
@@ -554,8 +554,9 @@ struct i2c_msg {
 
 #define I2C_FUNC_I2C                   0x00000001
 #define I2C_FUNC_10BIT_ADDR            0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING     0x00000004 /* I2C_M_NOSTART etc. */
+#define I2C_FUNC_PROTOCOL_MANGLING     0x00000004 /* I2C_M_IGNORE_NAK etc. */
 #define I2C_FUNC_SMBUS_PEC             0x00000008
+#define I2C_FUNC_NOSTART               0x00000010 /* I2C_M_NOSTART */
 #define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
 #define I2C_FUNC_SMBUS_QUICK           0x00010000
 #define I2C_FUNC_SMBUS_READ_BYTE       0x00020000
index e4baff5f7ff403722f54b5e6dcb7f67b2926fa98..9e65eff6af3bdc5dd6e865296233c8f9a1cd2a10 100644 (file)
@@ -149,6 +149,7 @@ extern struct cred init_cred;
        .normal_prio    = MAX_PRIO-20,                                  \
        .policy         = SCHED_NORMAL,                                 \
        .cpus_allowed   = CPU_MASK_ALL,                                 \
+       .nr_cpus_allowed= NR_CPUS,                                      \
        .mm             = NULL,                                         \
        .active_mm      = &init_mm,                                     \
        .se             = {                                             \
@@ -157,7 +158,6 @@ extern struct cred init_cred;
        .rt             = {                                             \
                .run_list       = LIST_HEAD_INIT(tsk.rt.run_list),      \
                .time_slice     = RR_TIMESLICE,                         \
-               .nr_cpus_allowed = NR_CPUS,                             \
        },                                                              \
        .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
        INIT_PUSHABLE_TASKS(tsk)                                        \
index a81671453575d800ec3d102e1aa6a8bf517073ce..2740d080ec6b7607fa1daba2bb2f72395fe34a7f 100644 (file)
@@ -116,6 +116,7 @@ struct input_keymap_entry {
 
 /**
  * EVIOCGMTSLOTS(len) - get MT slot values
+ * @len: size of the data buffer in bytes
  *
  * The ioctl buffer argument should be binary equivalent to
  *
index c91171599cb68825709aa12a9d79e80026bd9dfb..e68a8e53bb59acf87c2dd07259f77ce491d5ec1e 100644 (file)
@@ -142,8 +142,6 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
 extern int __must_check
 request_percpu_irq(unsigned int irq, irq_handler_t handler,
                   const char *devname, void __percpu *percpu_dev_id);
-
-extern void exit_irq_thread(void);
 #else
 
 extern int __must_check
@@ -177,8 +175,6 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
 {
        return request_irq(irq, handler, 0, devname, percpu_dev_id);
 }
-
-static inline void exit_irq_thread(void) { }
 #endif
 
 extern void free_irq(unsigned int, void *);
index 8a297a5e794cc8e51c22351098b80a35ce43ef09..5499c92a91539afcc0987d49fe6477acad2d16e4 100644 (file)
@@ -62,6 +62,8 @@ struct ipc_namespace {
        unsigned int    mq_queues_max;   /* initialized to DFLT_QUEUESMAX */
        unsigned int    mq_msg_max;      /* initialized to DFLT_MSGMAX */
        unsigned int    mq_msgsize_max;  /* initialized to DFLT_MSGSIZEMAX */
+       unsigned int    mq_msg_default;
+       unsigned int    mq_msgsize_default;
 
        /* user_ns which owns the ipc ns */
        struct user_namespace *user_ns;
@@ -90,11 +92,41 @@ static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
 
 #ifdef CONFIG_POSIX_MQUEUE
 extern int mq_init_ns(struct ipc_namespace *ns);
-/* default values */
-#define DFLT_QUEUESMAX 256     /* max number of message queues */
-#define DFLT_MSGMAX    10      /* max number of messages in each queue */
-#define HARD_MSGMAX    (32768*sizeof(void *)/4)
-#define DFLT_MSGSIZEMAX 8192   /* max message size */
+/*
+ * POSIX Message Queue default values:
+ *
+ * MIN_*: Lowest value an admin can set the maximum unprivileged limit to
+ * DFLT_*MAX: Default values for the maximum unprivileged limits
+ * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply
+ *   an attribute to the open call and the queue must be created
+ * HARD_*: Highest value the maximums can be set to.  These are enforced
+ *   on CAP_SYS_RESOURCE apps as well making them inviolate (so make them
+ *   suitably high)
+ *
+ * POSIX Requirements:
+ *   Per app minimum openable message queues - 8.  This does not map well
+ *     to the fact that we limit the number of queues on a per namespace
+ *     basis instead of a per app basis.  So, make the default high enough
+ *     that no given app should have a hard time opening 8 queues.
+ *   Minimum maximum for HARD_MSGMAX - 32767.  I bumped this to 65536.
+ *   Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this.  However,
+ *     we have run into a situation where running applications in the wild
+ *     require this to be at least 5MB, and preferably 10MB, so I set the
+ *     value to 16MB in hopes that this user is the worst of the bunch and
+ *     the new maximum will handle anyone else.  I may have to revisit this
+ *     in the future.
+ */
+#define MIN_QUEUESMAX                  1
+#define DFLT_QUEUESMAX               256
+#define HARD_QUEUESMAX              1024
+#define MIN_MSGMAX                     1
+#define DFLT_MSG                      10U
+#define DFLT_MSGMAX                   10
+#define HARD_MSGMAX                65536
+#define MIN_MSGSIZEMAX               128
+#define DFLT_MSGSIZE                8192U
+#define DFLT_MSGSIZEMAX                     8192
+#define HARD_MSGSIZEMAX            (16*1024*1024)
 #else
 static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
 #endif
index 61f5cec031e0345bebb8e506f9727b57069dc706..a5261e3d2e3c26f3cf645af875e4093d5adcaa77 100644 (file)
@@ -301,8 +301,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  * @irq_pm_shutdown:   function called from core code on shutdown once per chip
  * @irq_print_chip:    optional to print special chip info in show_interrupts
  * @flags:             chip specific flags
- *
- * @release:           release function solely used by UML
  */
 struct irq_chip {
        const char      *name;
index 912c30a8ddb1e47cd732fbd95f281238ca601ae0..f334c7fab96762ab4131c9886df87d4d6d4dde9d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
+#include <crypto/hash.h>
 #endif
 
 #define journal_oom_retry 1
@@ -147,12 +148,24 @@ typedef struct journal_header_s
 #define JBD2_CRC32_CHKSUM   1
 #define JBD2_MD5_CHKSUM     2
 #define JBD2_SHA1_CHKSUM    3
+#define JBD2_CRC32C_CHKSUM  4
 
 #define JBD2_CRC32_CHKSUM_SIZE 4
 
 #define JBD2_CHECKSUM_BYTES (32 / sizeof(u32))
 /*
  * Commit block header for storing transactional checksums:
+ *
+ * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum*
+ * fields are used to store a checksum of the descriptor and data blocks.
+ *
+ * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum
+ * field is used to store crc32c(uuid+commit_block).  Each journal metadata
+ * block gets its own checksum, and data block checksums are stored in
+ * journal_block_tag (in the descriptor).  The other h_chksum* fields are
+ * not used.
+ *
+ * Checksum v1 and v2 are mutually exclusive features.
  */
 struct commit_header {
        __be32          h_magic;
@@ -175,13 +188,19 @@ struct commit_header {
 typedef struct journal_block_tag_s
 {
        __be32          t_blocknr;      /* The on-disk block number */
-       __be32          t_flags;        /* See below */
+       __be16          t_checksum;     /* truncated crc32c(uuid+seq+block) */
+       __be16          t_flags;        /* See below */
        __be32          t_blocknr_high; /* most-significant high 32bits. */
 } journal_block_tag_t;
 
 #define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
 #define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
 
+/* Tail of descriptor block, for checksumming */
+struct jbd2_journal_block_tail {
+       __be32          t_checksum;     /* crc32c(uuid+descr_block) */
+};
+
 /*
  * The revoke descriptor: used on disk to describe a series of blocks to
  * be revoked from the log
@@ -192,6 +211,10 @@ typedef struct jbd2_journal_revoke_header_s
        __be32           r_count;       /* Count of bytes used in the block */
 } jbd2_journal_revoke_header_t;
 
+/* Tail of revoke block, for checksumming */
+struct jbd2_journal_revoke_tail {
+       __be32          r_checksum;     /* crc32c(uuid+revoke_block) */
+};
 
 /* Definitions for the journal tag flags word: */
 #define JBD2_FLAG_ESCAPE               1       /* on-disk block is escaped */
@@ -241,7 +264,10 @@ typedef struct journal_superblock_s
        __be32  s_max_trans_data;       /* Limit of data blocks per trans. */
 
 /* 0x0050 */
-       __u32   s_padding[44];
+       __u8    s_checksum_type;        /* checksum type */
+       __u8    s_padding2[3];
+       __u32   s_padding[42];
+       __be32  s_checksum;             /* crc32c(superblock) */
 
 /* 0x0100 */
        __u8    s_users[16*48];         /* ids of all fs'es sharing the log */
@@ -263,13 +289,15 @@ typedef struct journal_superblock_s
 #define JBD2_FEATURE_INCOMPAT_REVOKE           0x00000001
 #define JBD2_FEATURE_INCOMPAT_64BIT            0x00000002
 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT     0x00000004
+#define JBD2_FEATURE_INCOMPAT_CSUM_V2          0x00000008
 
 /* Features known to this kernel version: */
 #define JBD2_KNOWN_COMPAT_FEATURES     JBD2_FEATURE_COMPAT_CHECKSUM
 #define JBD2_KNOWN_ROCOMPAT_FEATURES   0
 #define JBD2_KNOWN_INCOMPAT_FEATURES   (JBD2_FEATURE_INCOMPAT_REVOKE | \
                                        JBD2_FEATURE_INCOMPAT_64BIT | \
-                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)
+                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
+                                       JBD2_FEATURE_INCOMPAT_CSUM_V2)
 
 #ifdef __KERNEL__
 
@@ -939,6 +967,12 @@ struct journal_s
         * superblock pointer here
         */
        void *j_private;
+
+       /* Reference to checksum algorithm driver via cryptoapi */
+       struct crypto_shash *j_chksum_driver;
+
+       /* Precomputed journal UUID checksum for seeding other checksums */
+       __u32 j_csum_seed;
 };
 
 /*
@@ -1268,6 +1302,25 @@ static inline int jbd_space_needed(journal_t *journal)
 
 extern int jbd_blocks_per_page(struct inode *inode);
 
+static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
+                             const void *address, unsigned int length)
+{
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(journal->j_chksum_driver)];
+       } desc;
+       int err;
+
+       desc.shash.tfm = journal->j_chksum_driver;
+       desc.shash.flags = 0;
+       *(u32 *)desc.ctx = crc;
+
+       err = crypto_shash_update(&desc.shash, address, length);
+       BUG_ON(err);
+
+       return *(u32 *)desc.ctx;
+}
+
 #ifdef __KERNEL__
 
 #define buffer_trace_init(bh)  do {} while (0)
index 6230f8556a4eeac37bcaaa83a4cc913177e09c56..6133679bc4c01ace20a0114fd50ff7c3481c7eb9 100644 (file)
@@ -12,6 +12,7 @@ enum jbd_state_bits {
        BH_State,               /* Pins most journal_head state */
        BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
        BH_Unshadow,            /* Dummy bit, for BJ_Shadow wakeup filtering */
+       BH_Verified,            /* Metadata block has been verified ok */
        BH_JBDPrivateStart,     /* First bit available for private use by FS */
 };
 
@@ -24,6 +25,7 @@ TAS_BUFFER_FNS(Revoked, revoked)
 BUFFER_FNS(RevokeValid, revokevalid)
 TAS_BUFFER_FNS(RevokeValid, revokevalid)
 BUFFER_FNS(Freed, freed)
+BUFFER_FNS(Verified, verified)
 
 static inline struct buffer_head *jh2bh(struct journal_head *jh)
 {
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h
new file mode 100644 (file)
index 0000000..2dcd1b3
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef _LINUX_KCMP_H
+#define _LINUX_KCMP_H
+
+/* Comparison type */
+enum kcmp_type {
+       KCMP_FILE,
+       KCMP_VM,
+       KCMP_FILES,
+       KCMP_FS,
+       KCMP_SIGHAND,
+       KCMP_IO,
+       KCMP_SYSVSEM,
+
+       KCMP_TYPES,
+};
+
+#endif /* _LINUX_KCMP_H */
index ec55a3c8ba77db1ceee13a8e2cff5b014895b2e2..604382143bcfccd6772260ed56e16589800af83c 100644 (file)
@@ -35,6 +35,7 @@
 #define LLONG_MAX      ((long long)(~0ULL>>1))
 #define LLONG_MIN      (-LLONG_MAX - 1)
 #define ULLONG_MAX     (~0ULL)
+#define SIZE_MAX       (~(size_t)0)
 
 #define STACK_MAGIC    0xdeadbeef
 
@@ -376,7 +377,6 @@ extern enum system_states {
        SYSTEM_HALT,
        SYSTEM_POWER_OFF,
        SYSTEM_RESTART,
-       SYSTEM_SUSPEND_DISK,
 } system_state;
 
 #define TAINT_PROPRIETARY_MODULE       0
index 0d7d6a1b172f29fde03d030168b253197b84f479..37c5f7261142c24fa582121d2bdfd9479285f681 100644 (file)
@@ -1,8 +1,58 @@
 #ifndef LINUX_KEXEC_H
 #define LINUX_KEXEC_H
 
-#ifdef CONFIG_KEXEC
+/* kexec system call -  It loads the new kernel to boot into.
+ * kexec does not sync, or unmount filesystems so if you need
+ * that to happen you need to do that yourself.
+ */
+
 #include <linux/types.h>
+
+/* kexec flags for different usage scenarios */
+#define KEXEC_ON_CRASH         0x00000001
+#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_ARCH_MASK                0xffff0000
+
+/* These values match the ELF architecture values.
+ * Unless there is a good reason that should continue to be the case.
+ */
+#define KEXEC_ARCH_DEFAULT ( 0 << 16)
+#define KEXEC_ARCH_386     ( 3 << 16)
+#define KEXEC_ARCH_X86_64  (62 << 16)
+#define KEXEC_ARCH_PPC     (20 << 16)
+#define KEXEC_ARCH_PPC64   (21 << 16)
+#define KEXEC_ARCH_IA_64   (50 << 16)
+#define KEXEC_ARCH_ARM     (40 << 16)
+#define KEXEC_ARCH_S390    (22 << 16)
+#define KEXEC_ARCH_SH      (42 << 16)
+#define KEXEC_ARCH_MIPS_LE (10 << 16)
+#define KEXEC_ARCH_MIPS    ( 8 << 16)
+
+/* The artificial cap on the number of segments passed to kexec_load. */
+#define KEXEC_SEGMENT_MAX 16
+
+#ifndef __KERNEL__
+/*
+ * This structure is used to hold the arguments that are used when
+ * loading  kernel binaries.
+ */
+struct kexec_segment {
+       const void *buf;
+       size_t bufsz;
+       const void *mem;
+       size_t memsz;
+};
+
+/* Load a new kernel image as described by the kexec_segment array
+ * consisting of passed number of segments at the entry-point address.
+ * The flags allow different useage types.
+ */
+extern int kexec_load(void *, size_t, struct kexec_segment *,
+               unsigned long int);
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+#ifdef CONFIG_KEXEC
 #include <linux/list.h>
 #include <linux/linkage.h>
 #include <linux/compat.h>
@@ -67,11 +117,10 @@ typedef unsigned long kimage_entry_t;
 #define IND_DONE         0x4
 #define IND_SOURCE       0x8
 
-#define KEXEC_SEGMENT_MAX 16
 struct kexec_segment {
        void __user *buf;
        size_t bufsz;
-       unsigned long mem;      /* User space sees this as a (void *) ... */
+       unsigned long mem;
        size_t memsz;
 };
 
@@ -175,25 +224,6 @@ extern struct kimage *kexec_crash_image;
 #define kexec_flush_icache_page(page)
 #endif
 
-#define KEXEC_ON_CRASH         0x00000001
-#define KEXEC_PRESERVE_CONTEXT 0x00000002
-#define KEXEC_ARCH_MASK                0xffff0000
-
-/* These values match the ELF architecture values.
- * Unless there is a good reason that should continue to be the case.
- */
-#define KEXEC_ARCH_DEFAULT ( 0 << 16)
-#define KEXEC_ARCH_386     ( 3 << 16)
-#define KEXEC_ARCH_X86_64  (62 << 16)
-#define KEXEC_ARCH_PPC     (20 << 16)
-#define KEXEC_ARCH_PPC64   (21 << 16)
-#define KEXEC_ARCH_IA_64   (50 << 16)
-#define KEXEC_ARCH_ARM     (40 << 16)
-#define KEXEC_ARCH_S390    (22 << 16)
-#define KEXEC_ARCH_SH      (42 << 16)
-#define KEXEC_ARCH_MIPS_LE (10 << 16)
-#define KEXEC_ARCH_MIPS    ( 8 << 16)
-
 /* List of defined/legal kexec flags */
 #ifndef CONFIG_KEXEC_JUMP
 #define KEXEC_FLAGS    KEXEC_ON_CRASH
@@ -228,4 +258,5 @@ struct task_struct;
 static inline void crash_kexec(struct pt_regs *regs) { }
 static inline int kexec_should_crash(struct task_struct *p) { return 0; }
 #endif /* CONFIG_KEXEC */
+#endif /* __KERNEL__ */
 #endif /* LINUX_KEXEC_H */
index 5231800770e1ea3b8cc0a9b081668b4edbdf55b8..4cd22ed627efd79205860b0efa6dde15a07897ed 100644 (file)
@@ -308,9 +308,6 @@ static inline bool key_is_instantiated(const struct key *key)
 #ifdef CONFIG_SYSCTL
 extern ctl_table key_sysctls[];
 #endif
-
-extern void key_replace_session_keyring(void);
-
 /*
  * the userspace interface
  */
@@ -334,7 +331,6 @@ extern void key_init(void);
 #define key_fsuid_changed(t)           do { } while(0)
 #define key_fsgid_changed(t)           do { } while(0)
 #define key_init()                     do { } while(0)
-#define key_replace_session_keyring()  do { } while(0)
 
 #endif /* CONFIG_KEYS */
 #endif /* __KERNEL__ */
index dd99c329e1616ec76af1c9e8b2d11cee08434aea..5398d5807075cd2649f99363e32c49e09c5d6a75 100644 (file)
@@ -66,40 +66,10 @@ struct subprocess_info {
        void *data;
 };
 
-/* Allocate a subprocess_info structure */
-struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
-                                                 char **envp, gfp_t gfp_mask);
-
-/* Set various pieces of state into the subprocess_info structure */
-void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info, struct cred *new),
-                   void (*cleanup)(struct subprocess_info *info),
-                   void *data);
-
-/* Actually execute the sub-process */
-int call_usermodehelper_exec(struct subprocess_info *info, int wait);
-
-/* Free the subprocess_info. This is only needed if you're not going
-   to call call_usermodehelper_exec */
-void call_usermodehelper_freeinfo(struct subprocess_info *info);
-
-static inline int
+extern int
 call_usermodehelper_fns(char *path, char **argv, char **envp, int wait,
                        int (*init)(struct subprocess_info *info, struct cred *new),
-                       void (*cleanup)(struct subprocess_info *), void *data)
-{
-       struct subprocess_info *info;
-       gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-
-       info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
-       if (info == NULL)
-               return -ENOMEM;
-
-       call_usermodehelper_setfns(info, init, cleanup, data);
-
-       return call_usermodehelper_exec(info, wait);
-}
+                       void (*cleanup)(struct subprocess_info *), void *data);
 
 static inline int
 call_usermodehelper(char *path, char **argv, char **envp, int wait)
index 35f7237ec972bef4ce1ba24cd617571a193ce806..2e7a1e032c71a91e6a9dff98a0b637d621932a25 100644 (file)
@@ -21,6 +21,7 @@
  * is passed to the kernel.
  */
 enum kmsg_dump_reason {
+       KMSG_DUMP_UNDEF,
        KMSG_DUMP_PANIC,
        KMSG_DUMP_OOPS,
        KMSG_DUMP_EMERG,
@@ -31,23 +32,42 @@ enum kmsg_dump_reason {
 
 /**
  * struct kmsg_dumper - kernel crash message dumper structure
- * @dump:      The callback which gets called on crashes. The buffer is passed
- *             as two sections, where s1 (length l1) contains the older
- *             messages and s2 (length l2) contains the newer.
  * @list:      Entry in the dumper list (private)
+ * @dump:      Call into dumping code which will retrieve the data with
+ *             through the record iterator
+ * @max_reason:        filter for highest reason number that should be dumped
  * @registered:        Flag that specifies if this is already registered
  */
 struct kmsg_dumper {
-       void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
-                       const char *s1, unsigned long l1,
-                       const char *s2, unsigned long l2);
        struct list_head list;
-       int registered;
+       void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+       enum kmsg_dump_reason max_reason;
+       bool active;
+       bool registered;
+
+       /* private state of the kmsg iterator */
+       u32 cur_idx;
+       u32 next_idx;
+       u64 cur_seq;
+       u64 next_seq;
 };
 
 #ifdef CONFIG_PRINTK
 void kmsg_dump(enum kmsg_dump_reason reason);
 
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+                              char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                       char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                         char *buf, size_t size, size_t *len);
+
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+
+void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+
 int kmsg_dump_register(struct kmsg_dumper *dumper);
 
 int kmsg_dump_unregister(struct kmsg_dumper *dumper);
@@ -56,6 +76,33 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
 {
 }
 
+static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
+                                            bool syslog, const char *line,
+                                            size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                               const char *line, size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                                       char *buf, size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+}
+
+static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+}
+
 static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
 {
        return -EINVAL;
index c4464356b35b0af21eaafe6cbd1d2d7b4f549814..96c158a37d3e5ead53765e4bfa2280a82c79be5e 100644 (file)
@@ -815,7 +815,7 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 #ifdef CONFIG_HAVE_KVM_EVENTFD
 
 void kvm_eventfd_init(struct kvm *kvm);
-int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
+int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
@@ -824,7 +824,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
 
-static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
        return -EINVAL;
 }
index 87f402ccec55567330943ab774ffb12ae21c7da8..f01e5f6d1f07a4966927bb7acd5707f8f77904c8 100644 (file)
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
+#include <linux/notifier.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
-#define br_lock_init(name)     name##_lock_init()
-#define br_read_lock(name)     name##_local_lock()
-#define br_read_unlock(name)   name##_local_unlock()
-#define br_write_lock(name)    name##_global_lock_online()
-#define br_write_unlock(name)  name##_global_unlock_online()
+#define br_lock_init(name)     lg_lock_init(name, #name)
+#define br_read_lock(name)     lg_local_lock(name)
+#define br_read_unlock(name)   lg_local_unlock(name)
+#define br_write_lock(name)    lg_global_lock(name)
+#define br_write_unlock(name)  lg_global_unlock(name)
 
-#define DECLARE_BRLOCK(name)   DECLARE_LGLOCK(name)
 #define DEFINE_BRLOCK(name)    DEFINE_LGLOCK(name)
 
-
-#define lg_lock_init(name)     name##_lock_init()
-#define lg_local_lock(name)    name##_local_lock()
-#define lg_local_unlock(name)  name##_local_unlock()
-#define lg_local_lock_cpu(name, cpu)   name##_local_lock_cpu(cpu)
-#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
-#define lg_global_lock(name)   name##_global_lock()
-#define lg_global_unlock(name) name##_global_unlock()
-#define lg_global_lock_online(name) name##_global_lock_online()
-#define lg_global_unlock_online(name) name##_global_unlock_online()
-
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 #define LOCKDEP_INIT_MAP lockdep_init_map
 
 #define DEFINE_LGLOCK_LOCKDEP(name)
 #endif
 
-
-#define DECLARE_LGLOCK(name)                                           \
- extern void name##_lock_init(void);                                   \
- extern void name##_local_lock(void);                                  \
- extern void name##_local_unlock(void);                                        \
- extern void name##_local_lock_cpu(int cpu);                           \
- extern void name##_local_unlock_cpu(int cpu);                         \
- extern void name##_global_lock(void);                                 \
- extern void name##_global_unlock(void);                               \
- extern void name##_global_lock_online(void);                          \
- extern void name##_global_unlock_online(void);                                \
+struct lglock {
+       arch_spinlock_t __percpu *lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lock_class_key lock_key;
+       struct lockdep_map    lock_dep_map;
+#endif
+};
 
 #define DEFINE_LGLOCK(name)                                            \
-                                                                       \
- DEFINE_SPINLOCK(name##_cpu_lock);                                     \
- cpumask_t name##_cpus __read_mostly;                                  \
- DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
- DEFINE_LGLOCK_LOCKDEP(name);                                          \
-                                                                       \
- static int                                                            \
- name##_lg_cpu_callback(struct notifier_block *nb,                     \
-                               unsigned long action, void *hcpu)       \
- {                                                                     \
-       switch (action & ~CPU_TASKS_FROZEN) {                           \
-       case CPU_UP_PREPARE:                                            \
-               spin_lock(&name##_cpu_lock);                            \
-               cpu_set((unsigned long)hcpu, name##_cpus);              \
-               spin_unlock(&name##_cpu_lock);                          \
-               break;                                                  \
-       case CPU_UP_CANCELED: case CPU_DEAD:                            \
-               spin_lock(&name##_cpu_lock);                            \
-               cpu_clear((unsigned long)hcpu, name##_cpus);            \
-               spin_unlock(&name##_cpu_lock);                          \
-       }                                                               \
-       return NOTIFY_OK;                                               \
- }                                                                     \
- static struct notifier_block name##_lg_cpu_notifier = {               \
-       .notifier_call = name##_lg_cpu_callback,                        \
- };                                                                    \
- void name##_lock_init(void) {                                         \
-       int i;                                                          \
-       LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
-       }                                                               \
-       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
-       get_online_cpus();                                              \
-       for_each_online_cpu(i)                                          \
-               cpu_set(i, name##_cpus);                                \
-       put_online_cpus();                                              \
- }                                                                     \
- EXPORT_SYMBOL(name##_lock_init);                                      \
-                                                                       \
- void name##_local_lock(void) {                                                \
-       arch_spinlock_t *lock;                                          \
-       preempt_disable();                                              \
-       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
-       lock = &__get_cpu_var(name##_lock);                             \
-       arch_spin_lock(lock);                                           \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_lock);                                     \
-                                                                       \
- void name##_local_unlock(void) {                                      \
-       arch_spinlock_t *lock;                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
-       lock = &__get_cpu_var(name##_lock);                             \
-       arch_spin_unlock(lock);                                         \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_unlock);                                   \
-                                                                       \
- void name##_local_lock_cpu(int cpu) {                                 \
-       arch_spinlock_t *lock;                                          \
-       preempt_disable();                                              \
-       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
-       lock = &per_cpu(name##_lock, cpu);                              \
-       arch_spin_lock(lock);                                           \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_lock_cpu);                                 \
-                                                                       \
- void name##_local_unlock_cpu(int cpu) {                               \
-       arch_spinlock_t *lock;                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
-       lock = &per_cpu(name##_lock, cpu);                              \
-       arch_spin_unlock(lock);                                         \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_unlock_cpu);                               \
-                                                                       \
- void name##_global_lock_online(void) {                                        \
-       int i;                                                          \
-       spin_lock(&name##_cpu_lock);                                    \
-       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_cpu(i, &name##_cpus) {                                 \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_lock(lock);                                   \
-       }                                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_lock_online);                             \
-                                                                       \
- void name##_global_unlock_online(void) {                              \
-       int i;                                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_cpu(i, &name##_cpus) {                                 \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_unlock(lock);                                 \
-       }                                                               \
-       spin_unlock(&name##_cpu_lock);                                  \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_unlock_online);                           \
-                                                                       \
- void name##_global_lock(void) {                                       \
-       int i;                                                          \
-       preempt_disable();                                              \
-       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_lock(lock);                                   \
-       }                                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_lock);                                    \
-                                                                       \
- void name##_global_unlock(void) {                                     \
-       int i;                                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_unlock(lock);                                 \
-       }                                                               \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_unlock);
+       DEFINE_LGLOCK_LOCKDEP(name);                                    \
+       DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)                  \
+       = __ARCH_SPIN_LOCK_UNLOCKED;                                    \
+       struct lglock name = { .lock = &name ## _lock }
+
+void lg_lock_init(struct lglock *lg, char *name);
+void lg_local_lock(struct lglock *lg);
+void lg_local_unlock(struct lglock *lg);
+void lg_local_lock_cpu(struct lglock *lg, int cpu);
+void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+void lg_global_lock(struct lglock *lg);
+void lg_global_unlock(struct lglock *lg);
+
 #endif
index 11a966e5f829e9d9862589e393c1576780cfed48..4d24d64578c4c6f4baca418bad5b64ba5fc30555 100644 (file)
@@ -54,7 +54,7 @@ extern void   nlmclnt_done(struct nlm_host *host);
 
 extern int     nlmclnt_proc(struct nlm_host *host, int cmd,
                                        struct file_lock *fl);
-extern int     lockd_up(void);
-extern void    lockd_down(void);
+extern int     lockd_up(struct net *net);
+extern void    lockd_down(struct net *net);
 
 #endif /* LINUX_LOCKD_BIND_H */
index a6bb102351486a15a8e1bce8a2da4fbfea4037c6..19dc455b4f3dd072d0e58abdfed2de6f8ae5727c 100644 (file)
@@ -50,9 +50,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
                                phys_addr_t size, phys_addr_t align, int nid);
 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
                                   phys_addr_t size, phys_addr_t align);
-int memblock_free_reserved_regions(void);
-int memblock_reserve_reserved_regions(void);
-
+phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
 void memblock_allow_resize(void);
 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 int memblock_add(phys_addr_t base, phys_addr_t size);
index 7c08052e332111a7aa14d3f4bd7f0390b2a390c9..39ed62ab5b8a38ef3aafa3729767dc7d7c7587ef 100644 (file)
@@ -26,7 +26,8 @@ typedef struct mempool_s {
 extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
                        mempool_free_t *free_fn, void *pool_data);
 extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
-                       mempool_free_t *free_fn, void *pool_data, int nid);
+                       mempool_free_t *free_fn, void *pool_data,
+                       gfp_t gfp_mask, int nid);
 
 extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
 extern void mempool_destroy(mempool_t *pool);
index 6e27fa99e8b978d785c196ca5acb89123058f14d..6a8f002b8ed3bb77e77b48c611611b6c708b1979 100644 (file)
@@ -64,6 +64,7 @@ enum {
        MLX4_MAX_NUM_PF         = 16,
        MLX4_MAX_NUM_VF         = 64,
        MLX4_MFUNC_MAX          = 80,
+       MLX4_MAX_EQ_NUM         = 1024,
        MLX4_MFUNC_EQ_NUM       = 4,
        MLX4_MFUNC_MAX_EQES     = 8,
        MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
@@ -239,6 +240,10 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
        return (major << 32) | (minor << 16) | subminor;
 }
 
+struct mlx4_phys_caps {
+       u32                     num_phys_eqs;
+};
+
 struct mlx4_caps {
        u64                     fw_ver;
        u32                     function;
@@ -499,6 +504,7 @@ struct mlx4_dev {
        unsigned long           flags;
        unsigned long           num_slaves;
        struct mlx4_caps        caps;
+       struct mlx4_phys_caps   phys_caps;
        struct radix_tree_root  qp_table_tree;
        u8                      rev_id;
        char                    board_id[MLX4_BOARD_ID_LEN];
index ce26716238c3632ba95d909bf69e1e4d4bc91da2..b36d08ce5c578dcd18e224828217ded481de54ee 100644 (file)
@@ -1392,7 +1392,7 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
        unsigned long len, unsigned long flags,
        vm_flags_t vm_flags, unsigned long pgoff);
-extern unsigned long do_mmap(struct file *, unsigned long,
+extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
         unsigned long, unsigned long);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
index dad95bdd06d798545cea969d9cd4b9091e8a3089..704a626d94a08adc03b32b756d2d0a1c9a1f40cf 100644 (file)
@@ -57,8 +57,18 @@ struct page {
                };
 
                union {
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+       defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
                        /* Used for cmpxchg_double in slub */
                        unsigned long counters;
+#else
+                       /*
+                        * Keep _count separate from slub cmpxchg_double data.
+                        * As the rest of the double word is protected by
+                        * slab_lock but _count is not.
+                        */
+                       unsigned counters;
+#endif
 
                        struct {
 
index 5cdc96da9dd53bbfbd13ca198367ecd4dd45b46f..e78c0e236e9dce163fc8df904698f43c455fcf85 100644 (file)
@@ -4,7 +4,7 @@
  * SDHCI declarations specific to ST SPEAr platform
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c9fe66c58f8fc718e68b2147733a0acab750d5aa..17446d3c36027ccac99f70c097af2948390800ec 100644 (file)
@@ -98,7 +98,9 @@
 
 #define SDIO_CCCR_IF           0x07    /* bus interface controls */
 
+#define  SDIO_BUS_WIDTH_MASK   0x03    /* data bus width setting */
 #define  SDIO_BUS_WIDTH_1BIT   0x00
+#define  SDIO_BUS_WIDTH_RESERVED 0x01
 #define  SDIO_BUS_WIDTH_4BIT   0x02
 #define  SDIO_BUS_ECSI         0x20    /* Enable continuous SPI interrupt */
 #define  SDIO_BUS_SCSI         0x40    /* Support continuous SPI interrupt */
index 2427706f78b4d7043b5476b310d5a203631dbf90..68c569fcbb66ff751d689c57e19f196d33b2b0f8 100644 (file)
@@ -694,7 +694,7 @@ typedef struct pglist_data {
                                             range, including holes */
        int node_id;
        wait_queue_head_t kswapd_wait;
-       struct task_struct *kswapd;
+       struct task_struct *kswapd;     /* Protected by lock_memory_hotplug() */
        int kswapd_max_order;
        enum zone_type classzone_idx;
 } pg_data_t;
index 1b14d25162cb7cc56ff08960553ad89b32521ce7..d6a58065c09cacf4637ac9381623e6d0293c6b5d 100644 (file)
@@ -128,7 +128,7 @@ struct kparam_array
  * The ops can have NULL set or get functions.
  */
 #define module_param_cb(name, ops, arg, perm)                                \
-       __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0)
+       __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1)
 
 /**
  * <level>_param_cb - general callback for a module/cmdline parameter
@@ -192,7 +192,7 @@ struct kparam_array
                 { (void *)set, (void *)get };                          \
        __module_param_call(MODULE_PARAM_PREFIX,                        \
                            name, &__param_ops_##name, arg,             \
-                           (perm) + sizeof(__check_old_set_param(set))*0, 0)
+                           (perm) + sizeof(__check_old_set_param(set))*0, -1)
 
 /* We don't get oldget: it's often a new-style param_get_uint, etc. */
 static inline int
@@ -272,7 +272,7 @@ static inline void __kernel_param_unlock(void)
  */
 #define core_param(name, var, type, perm)                              \
        param_check_##type(name, &(var));                               \
-       __module_param_call("", name, &param_ops_##type, &var, perm, 0)
+       __module_param_call("", name, &param_ops_##type, &var, perm, -1)
 #endif /* !MODULE */
 
 /**
@@ -290,7 +290,7 @@ static inline void __kernel_param_unlock(void)
                = { len, string };                                      \
        __module_param_call(MODULE_PARAM_PREFIX, name,                  \
                            &param_ops_string,                          \
-                           .str = &__param_string_##name, perm, 0);    \
+                           .str = &__param_string_##name, perm, -1);   \
        __MODULE_PARM_TYPE(name, "string")
 
 /**
@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
        __module_param_call(MODULE_PARAM_PREFIX, name,                  \
                            &param_array_ops,                           \
                            .arr = &__param_arr_##name,                 \
-                           perm, 0);                                   \
+                           perm, -1);                                  \
        __MODULE_PARM_TYPE(name, "array of " #type)
 
 extern struct kernel_param_ops param_array_ops;
index 34066e65fdeb327b4da8e0fd58e4dc143d985fc4..11cc2ac67e756af2b633a8badabcc49f4ffe8cb7 100644 (file)
@@ -21,8 +21,9 @@
 #define CT_LE_W(v)     cpu_to_le16(v)
 #define CT_LE_L(v)     cpu_to_le32(v)
 
+#define MSDOS_ROOT_INO  1      /* The root inode number */
+#define MSDOS_FSINFO_INO 2     /* Used for managing the FSINFO block */
 
-#define MSDOS_ROOT_INO 1       /* == MINIX_ROOT_INO */
 #define MSDOS_DIR_BITS 5       /* log2(sizeof(struct msdos_dir_entry)) */
 
 /* directory limit */
index 69b6dbf46b5edd4700aec7343b2c589702ef118e..ed3c4e09f3d1f36ff2553bace163186a0be2784b 100644 (file)
 #define GPMI_NAND_RES_SIZE     6
 
 /* Resource names for the GPMI NAND driver. */
-#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "GPMI NAND GPMI Registers"
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
 #define GPMI_NAND_GPMI_INTERRUPT_RES_NAME  "GPMI NAND GPMI Interrupt"
-#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "GPMI NAND BCH Registers"
-#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "GPMI NAND BCH Interrupt"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
 #define GPMI_NAND_DMA_CHANNELS_RES_NAME    "GPMI NAND DMA Channels"
-#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "GPMI NAND DMA Interrupt"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "gpmi-dma"
 
 /**
  * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
index cf5ea8cdcf8e3a9a325da9a6f28f6fb120ea1319..63dadc0dfb629a74f6e578a16255ffc4cdb4d56a 100644 (file)
@@ -157,6 +157,15 @@ struct mtd_info {
        unsigned int erasesize_mask;
        unsigned int writesize_mask;
 
+       /*
+        * read ops return -EUCLEAN if max number of bitflips corrected on any
+        * one region comprising an ecc step equals or exceeds this value.
+        * Settable by driver, else defaults to ecc_strength.  User can override
+        * in sysfs.  N.B. The meaning of the -EUCLEAN return code has changed;
+        * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
+        */
+       unsigned int bitflip_threshold;
+
        // Kernel-only stuff starts here.
        const char *name;
        int index;
@@ -164,7 +173,7 @@ struct mtd_info {
        /* ECC layout structure pointer - read only! */
        struct nand_ecclayout *ecclayout;
 
-       /* max number of correctible bit errors per writesize */
+       /* max number of correctible bit errors per ecc step */
        unsigned int ecc_strength;
 
        /* Data for variable erase regions. If numeraseregions is zero,
index 1482340d3d9f5e0a6ba5d61a020b112320618362..57977c6405292347f8026fd23f7bc4572312e345 100644 (file)
@@ -161,8 +161,6 @@ typedef enum {
  * Option constants for bizarre disfunctionality and real
  * features.
  */
-/* Chip can not auto increment pages */
-#define NAND_NO_AUTOINCR       0x00000001
 /* Buswidth is 16 bit */
 #define NAND_BUSWIDTH_16       0x00000002
 /* Device supports partial programming without padding */
@@ -207,7 +205,6 @@ typedef enum {
        (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
 
 /* Macros to identify the above */
-#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR))
 #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
 #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
 #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
@@ -216,7 +213,7 @@ typedef enum {
                                        && (chip->page_shift > 9))
 
 /* Mask to zero out the chip options, which come from the id table */
-#define NAND_CHIPOPTIONS_MSK   (0x0000ffff & ~NAND_NO_AUTOINCR)
+#define NAND_CHIPOPTIONS_MSK   0x0000ffff
 
 /* Non chip related options */
 /* This option skips the bbt scan during initialization. */
@@ -363,21 +360,20 @@ struct nand_ecc_ctrl {
        int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
                        uint8_t *calc_ecc);
        int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-                       uint8_t *buf, int page);
+                       uint8_t *buf, int oob_required, int page);
        void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-                       const uint8_t *buf);
+                       const uint8_t *buf, int oob_required);
        int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
-                       uint8_t *buf, int page);
+                       uint8_t *buf, int oob_required, int page);
        int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
                        uint32_t offs, uint32_t len, uint8_t *buf);
        void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
-                       const uint8_t *buf);
+                       const uint8_t *buf, int oob_required);
        int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
                        int page);
        int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-                       int page, int sndcmd);
-       int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page,
-                       int sndcmd);
+                       int page);
+       int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
        int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
                        int page);
 };
@@ -459,6 +455,8 @@ struct nand_buffers {
  * @pagemask:          [INTERN] page number mask = number of (pages / chip) - 1
  * @pagebuf:           [INTERN] holds the pagenumber which is currently in
  *                     data_buf.
+ * @pagebuf_bitflips:  [INTERN] holds the bitflip count for the page which is
+ *                     currently in data_buf.
  * @subpagesize:       [INTERN] holds the subpagesize
  * @onfi_version:      [INTERN] holds the chip ONFI version (BCD encoded),
  *                     non 0 if ONFI supported.
@@ -505,7 +503,8 @@ struct nand_chip {
        int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
                        int status, int page);
        int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
-                       const uint8_t *buf, int page, int cached, int raw);
+                       const uint8_t *buf, int oob_required, int page,
+                       int cached, int raw);
 
        int chip_delay;
        unsigned int options;
@@ -519,6 +518,7 @@ struct nand_chip {
        uint64_t chipsize;
        int pagemask;
        int pagebuf;
+       unsigned int pagebuf_bitflips;
        int subpagesize;
        uint8_t cellinfo;
        int badblockpos;
@@ -654,6 +654,7 @@ struct platform_nand_ctrl {
        void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
        void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
        void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
+       unsigned char (*read_byte)(struct mtd_info *mtd);
        void *priv;
 };
 
index 2d7510f389346a1987b60ded53ca3bf33ad8f603..e9ac2df079ba7517b8d5c8da00e3a234075f5063 100644 (file)
@@ -313,5 +313,8 @@ extern int kernel_sock_shutdown(struct socket *sock,
        MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
                     "-type-" __stringify(type))
 
+#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
+       MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
+                    name)
 #endif /* __KERNEL__ */
 #endif /* _LINUX_NET_H */
index e7fd468f71268f5f2727260638980c2bf8aef315..d94cb14315196f7a35e64651bdd9465aba1c477e 100644 (file)
@@ -2795,15 +2795,15 @@ do {                                                            \
 #define netif_info(priv, type, dev, fmt, args...)              \
        netif_level(info, priv, type, dev, fmt, ##args)
 
-#if defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...)            \
-       netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define netif_dbg(priv, type, netdev, format, args...)         \
 do {                                                           \
        if (netif_msg_##type(priv))                             \
                dynamic_netdev_dbg(netdev, format, ##args);     \
 } while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...)            \
+       netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
 #else
 #define netif_dbg(priv, type, dev, format, args...)                    \
 ({                                                                     \
index abb1650940d21f11babe11aaf3730e434c5f0e38..826fc580757778dd1bfa6345edc680e0406bc6db 100644 (file)
@@ -27,7 +27,12 @@ union hmark_ports {
                __u16   src;
                __u16   dst;
        } p16;
+       struct {
+               __be16  src;
+               __be16  dst;
+       } b16;
        __u32   v32;
+       __be32  b32;
 };
 
 struct xt_hmark_info {
index fbb78fb09bd25c925d65207643bf61da614167d8..f58325a1d8fbe290fb8a7eb6e4ddc060ef553f91 100644 (file)
@@ -25,6 +25,7 @@ struct nfs41_impl_id;
  */
 struct nfs_client {
        atomic_t                cl_count;
+       atomic_t                cl_mds_count;
        int                     cl_cons_state;  /* current construction state (-ve: init error) */
 #define NFS_CS_READY           0               /* ready to be used */
 #define NFS_CS_INITING         1               /* busy initialising */
index d1a7bf51c326dc7f103aae60874a667f3307b373..8aadd90b808a67b466ceb66cbc0de0b57883e96b 100644 (file)
@@ -348,6 +348,7 @@ struct nfs_openargs {
        const struct qstr *     name;
        const struct nfs_server *server;         /* Needed for ID mapping */
        const u32 *             bitmask;
+       const u32 *             open_bitmap;
        __u32                   claim;
        struct nfs4_sequence_args       seq_args;
 };
@@ -1236,6 +1237,7 @@ struct nfs_pgio_header {
        struct list_head        rpc_list;
        atomic_t                refcnt;
        struct nfs_page         *req;
+       struct nfs_writeverf    *verf;
        struct pnfs_layout_segment *lseg;
        loff_t                  io_start;
        const struct rpc_call_ops *mds_ops;
@@ -1273,6 +1275,7 @@ struct nfs_write_data {
 struct nfs_write_header {
        struct nfs_pgio_header  header;
        struct nfs_write_data   rpc_data;
+       struct nfs_writeverf    verf;
 };
 
 struct nfs_mds_commit_info {
index f85308e688fd712f039ac45a5f442240113ad0bf..e33f747b173c500d02639dfd5257de093a79fff0 100644 (file)
@@ -103,6 +103,7 @@ struct svc_export {
        struct nfsd4_fs_locations ex_fslocs;
        int                     ex_nflavors;
        struct exp_flavor_info  ex_flavors[MAX_SECINFO_LIST];
+       struct cache_detail     *cd;
 };
 
 /* an "export key" (expkey) maps a filehandlefragement to an
@@ -129,24 +130,22 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
 /*
  * Function declarations
  */
-int                    nfsd_export_init(void);
-void                   nfsd_export_shutdown(void);
-void                   nfsd_export_flush(void);
+int                    nfsd_export_init(struct net *);
+void                   nfsd_export_shutdown(struct net *);
+void                   nfsd_export_flush(struct net *);
 struct svc_export *    rqst_exp_get_by_name(struct svc_rqst *,
                                             struct path *);
 struct svc_export *    rqst_exp_parent(struct svc_rqst *,
                                        struct path *);
 struct svc_export *    rqst_find_fsidzero_export(struct svc_rqst *);
-int                    exp_rootfh(struct auth_domain *, 
+int                    exp_rootfh(struct net *, struct auth_domain *,
                                        char *path, struct knfsd_fh *, int maxsize);
 __be32                 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
 __be32                 nfserrno(int errno);
 
-extern struct cache_detail svc_export_cache;
-
 static inline void exp_put(struct svc_export *exp)
 {
-       cache_put(&exp->h, &svc_export_cache);
+       cache_put(&exp->h, exp->cd);
 }
 
 static inline void exp_get(struct svc_export *exp)
index a6ee9aa898bb7122e682ade64172fc568d98abd6..a7b4fc386e634964b520d84709b82f2e59b7a065 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller platform data header file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f32578634d9d1a9c195c8075c80bc981607d3452..45db49f64bb492ddf34d2b451fffa810ea45b965 100644 (file)
@@ -555,6 +555,8 @@ enum perf_event_type {
        PERF_RECORD_MAX,                        /* non-ABI */
 };
 
+#define PERF_MAX_STACK_DEPTH           127
+
 enum perf_callchain_context {
        PERF_CONTEXT_HV                 = (__u64)-32,
        PERF_CONTEXT_KERNEL             = (__u64)-128,
@@ -609,8 +611,6 @@ struct perf_guest_info_callbacks {
 #include <linux/sysfs.h>
 #include <asm/local.h>
 
-#define PERF_MAX_STACK_DEPTH           255
-
 struct perf_callchain_entry {
        __u64                           nr;
        __u64                           ip[PERF_MAX_STACK_DEPTH];
index 4f75e531c112c176b7a29146c6581e857442dba2..241065c9ce51832962f0fce4c93696d4b09dcb9d 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/power_supply.h>
 
 enum data_source {
+       CM_BATTERY_PRESENT,
+       CM_NO_BATTERY,
        CM_FUEL_GAUGE,
        CM_CHARGER_STAT,
 };
@@ -29,6 +31,16 @@ enum polling_modes {
        CM_POLL_CHARGING_ONLY,
 };
 
+enum cm_event_types {
+       CM_EVENT_UNKNOWN = 0,
+       CM_EVENT_BATT_FULL,
+       CM_EVENT_BATT_IN,
+       CM_EVENT_BATT_OUT,
+       CM_EVENT_EXT_PWR_IN_OUT,
+       CM_EVENT_CHG_START_STOP,
+       CM_EVENT_OTHERS,
+};
+
 /**
  * struct charger_global_desc
  * @rtc_name: the name of RTC used to wake up the system from suspend.
@@ -38,11 +50,18 @@ enum polling_modes {
  *     rtc_only_wakeup() returning false.
  *     If the RTC given to CM is the only wakeup reason,
  *     rtc_only_wakeup should return true.
+ * @assume_timer_stops_in_suspend:
+ *     Assume that the jiffy timer stops in suspend-to-RAM.
+ *     When enabled, CM does not rely on jiffies value in
+ *     suspend_again and assumes that jiffies value does not
+ *     change during suspend.
  */
 struct charger_global_desc {
        char *rtc_name;
 
        bool (*rtc_only_wakeup)(void);
+
+       bool assume_timer_stops_in_suspend;
 };
 
 /**
@@ -50,6 +69,11 @@ struct charger_global_desc {
  * @psy_name: the name of power-supply-class for charger manager
  * @polling_mode:
  *     Determine which polling mode will be used
+ * @fullbatt_vchkdrop_ms:
+ * @fullbatt_vchkdrop_uV:
+ *     Check voltage drop after the battery is fully charged.
+ *     If it has dropped more than fullbatt_vchkdrop_uV after
+ *     fullbatt_vchkdrop_ms, CM will restart charging.
  * @fullbatt_uV: voltage in microvolt
  *     If it is not being charged and VBATT >= fullbatt_uV,
  *     it is assumed to be full.
@@ -76,6 +100,8 @@ struct charger_desc {
        enum polling_modes polling_mode;
        unsigned int polling_interval_ms;
 
+       unsigned int fullbatt_vchkdrop_ms;
+       unsigned int fullbatt_vchkdrop_uV;
        unsigned int fullbatt_uV;
 
        enum data_source battery_present;
@@ -101,6 +127,11 @@ struct charger_desc {
  * @fuel_gauge: power_supply for fuel gauge
  * @charger_stat: array of power_supply for chargers
  * @charger_enabled: the state of charger
+ * @fullbatt_vchk_jiffies_at:
+ *     jiffies at the time full battery check will occur.
+ * @fullbatt_vchk_uV: voltage in microvolt
+ *     criteria for full battery
+ * @fullbatt_vchk_work: work queue for full battery check
  * @emergency_stop:
  *     When setting true, stop charging
  * @last_temp_mC: the measured temperature in milli-Celsius
@@ -121,6 +152,10 @@ struct charger_manager {
 
        bool charger_enabled;
 
+       unsigned long fullbatt_vchk_jiffies_at;
+       unsigned int fullbatt_vchk_uV;
+       struct delayed_work fullbatt_vchk_work;
+
        int emergency_stop;
        int last_temp_mC;
 
@@ -134,14 +169,13 @@ struct charger_manager {
 #ifdef CONFIG_CHARGER_MANAGER
 extern int setup_charger_manager(struct charger_global_desc *gd);
 extern bool cm_suspend_again(void);
+extern void cm_notify_event(struct power_supply *psy,
+                               enum cm_event_types type, char *msg);
 #else
-static void __maybe_unused setup_charger_manager(struct charger_global_desc *gd)
-{ }
-
-static bool __maybe_unused cm_suspend_again(void)
-{
-       return false;
-}
+static inline int setup_charger_manager(struct charger_global_desc *gd)
+{ return 0; }
+static inline bool cm_suspend_again(void) { return false; }
+static inline void cm_notify_event(struct power_supply *psy,
+                               enum cm_event_types type, char *msg) { }
 #endif
-
 #endif /* _CHARGER_MANAGER_H */
index e01b167e66f068223f86321109b77687ce5ef50c..89dd84f47c6ed6041cde8b9259c8e737072a6ffa 100644 (file)
@@ -116,6 +116,18 @@ enum max17042_register {
        MAX17042_VFSOC          = 0xFF,
 };
 
+/* Registers specific to max17047/50 */
+enum max17047_register {
+       MAX17047_QRTbl00        = 0x12,
+       MAX17047_FullSOCThr     = 0x13,
+       MAX17047_QRTbl10        = 0x22,
+       MAX17047_QRTbl20        = 0x32,
+       MAX17047_V_empty        = 0x3A,
+       MAX17047_QRTbl30        = 0x42,
+};
+
+enum max170xx_chip_type {MAX17042, MAX17047};
+
 /*
  * used for setting a register to a desired value
  * addr : address for a register
@@ -144,6 +156,7 @@ struct max17042_config_data {
        u16     shdntimer;      /* 0x03F */
 
        /* App data */
+       u16     full_soc_thresh;        /* 0x13 */
        u16     design_cap;     /* 0x18 */
        u16     ichgt_term;     /* 0x1E */
 
@@ -162,6 +175,10 @@ struct max17042_config_data {
        u16     lavg_empty;     /* 0x36 */
        u16     dqacc;          /* 0x45 */
        u16     dpacc;          /* 0x46 */
+       u16     qrtbl00;        /* 0x12 */
+       u16     qrtbl10;        /* 0x22 */
+       u16     qrtbl20;        /* 0x32 */
+       u16     qrtbl30;        /* 0x42 */
 
        /* Cell technology from power_supply.h */
        u16     cell_technology;
index c38c13db8832e7b3c15440807e7e7719a5603792..3b912bee28d1693b8c6617f637354ed2869d306f 100644 (file)
@@ -96,6 +96,7 @@ enum power_supply_property {
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_VOLTAGE_AVG,
+       POWER_SUPPLY_PROP_VOLTAGE_OCV,
        POWER_SUPPLY_PROP_CURRENT_MAX,
        POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_CURRENT_AVG,
@@ -211,7 +212,7 @@ extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
 extern int power_supply_set_battery_charged(struct power_supply *psy);
 
-#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+#ifdef CONFIG_POWER_SUPPLY
 extern int power_supply_is_system_supplied(void);
 #else
 static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
@@ -261,6 +262,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
        case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+       case POWER_SUPPLY_PROP_VOLTAGE_OCV:
        case POWER_SUPPLY_PROP_POWER_NOW:
                return 1;
        default:
index 78b76e24cc7eed661d7696200c4c4f2291425a0d..289760f424aaa3247d2e4f66841334c67295c9ef 100644 (file)
 # define PR_SET_MM_START_STACK         5
 # define PR_SET_MM_START_BRK           6
 # define PR_SET_MM_BRK                 7
+# define PR_SET_MM_ARG_START           8
+# define PR_SET_MM_ARG_END             9
+# define PR_SET_MM_ENV_START           10
+# define PR_SET_MM_ENV_END             11
+# define PR_SET_MM_AUXV                        12
+# define PR_SET_MM_EXE_FILE            13
 
 /*
  * Set specific pid that is allowed to ptrace the current task.
 #define PR_SET_PTRACER 0x59616d61
 # define PR_SET_PTRACER_ANY ((unsigned long)-1)
 
-#define PR_SET_CHILD_SUBREAPER 36
-#define PR_GET_CHILD_SUBREAPER 37
+#define PR_SET_CHILD_SUBREAPER 36
+#define PR_GET_CHILD_SUBREAPER 37
 
 /*
  * If no_new_privs is set, then operations that grant new privileges (i.e.
  * Changing LSM security domain is considered a new privilege.  So, for example,
  * asking selinux for a specific new context (e.g. with runcon) will result
  * in execve returning -EPERM.
+ *
+ * See Documentation/prctl/no_new_privs.txt for more details.
  */
-#define PR_SET_NO_NEW_PRIVS 38
-#define PR_GET_NO_NEW_PRIVS 39
+#define PR_SET_NO_NEW_PRIVS    38
+#define PR_GET_NO_NEW_PRIVS    39
+
+#define PR_GET_TID_ADDRESS     40
 
 #endif /* _LINUX_PRCTL_H */
index 7ed7fd4dba49629fe6bd3ed2bef29b244bd7c6d9..3b823d49a85a7fa87f5d8cae9dbde5c4067bb103 100644 (file)
@@ -69,12 +69,14 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
                                                       size_t size,
                                                       bool ecc);
 void persistent_ram_free(struct persistent_ram_zone *prz);
+void persistent_ram_zap(struct persistent_ram_zone *prz);
 struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
                bool ecc);
 
 int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
        unsigned int count);
 
+void persistent_ram_save_old(struct persistent_ram_zone *prz);
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
 void *persistent_ram_old(struct persistent_ram_zone *prz);
 void persistent_ram_free_old(struct persistent_ram_zone *prz);
index 44835fb39793b3263eb79b17e21575fccb12c921..f36632061c668d0b9e7fc8def05d34650a5a55c8 100644 (file)
@@ -160,7 +160,9 @@ enum pxa_ssp_type {
        PXA25x_SSP,  /* pxa 210, 250, 255, 26x */
        PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
        PXA27x_SSP,
+       PXA3xx_SSP,
        PXA168_SSP,
+       PXA910_SSP,
        CE4100_SSP,
 };
 
index 0d04cd69ab9b8c483895fbf7bab7cd691508e77a..ffc444c38b0ab64ab999da3d670dde338e669265 100644 (file)
@@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
                        iter->index++;
                        if (likely(*slot))
                                return slot;
-                       if (flags & RADIX_TREE_ITER_CONTIG)
+                       if (flags & RADIX_TREE_ITER_CONTIG) {
+                               /* forbid switching to the next chunk */
+                               iter->next_index = 0;
                                break;
+                       }
                }
        }
        return NULL;
index 26d1a47591f1534306160811967248ed9ecfcc12..9cac722b169c9a1f5dbcae489d7b4c43a4fbe1a2 100644 (file)
@@ -184,7 +184,6 @@ static inline int rcu_preempt_depth(void)
 /* Internal to kernel */
 extern void rcu_sched_qs(int cpu);
 extern void rcu_bh_qs(int cpu);
-extern void rcu_preempt_note_context_switch(void);
 extern void rcu_check_callbacks(int cpu, int user);
 struct notifier_block;
 extern void rcu_idle_enter(void);
index adb5e5a38cae96cfcd66c98e489146e5f3ca8be2..4e56a9c69a356ca73fcd06cf775f337276f77f2e 100644 (file)
@@ -87,17 +87,24 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 
 #ifdef CONFIG_TINY_RCU
 
-static inline int rcu_needs_cpu(int cpu)
+static inline void rcu_preempt_note_context_switch(void)
 {
+}
+
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+       *delta_jiffies = ULONG_MAX;
        return 0;
 }
 
 #else /* #ifdef CONFIG_TINY_RCU */
 
+void rcu_preempt_note_context_switch(void);
 int rcu_preempt_needs_cpu(void);
 
-static inline int rcu_needs_cpu(int cpu)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+       *delta_jiffies = ULONG_MAX;
        return rcu_preempt_needs_cpu();
 }
 
@@ -106,6 +113,7 @@ static inline int rcu_needs_cpu(int cpu)
 static inline void rcu_note_context_switch(int cpu)
 {
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch();
 }
 
 /*
index 3c6083cde4fc2d33914012483929215fcf2ce23b..952b793393045d63b5f0f78c9f459ba4ad81a2f4 100644 (file)
@@ -32,7 +32,7 @@
 
 extern void rcu_init(void);
 extern void rcu_note_context_switch(int cpu);
-extern int rcu_needs_cpu(int cpu);
+extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
 extern void rcu_cpu_stall_reset(void);
 
 /*
index 4d50611112ba118e69df87888a81f764aee4c346..a90ebadd9da055bb5130782246872a0ef53d8438 100644 (file)
@@ -20,6 +20,9 @@
 #include <linux/errno.h>
 #include <linux/device.h>
 #include <linux/rio_regs.h>
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#include <linux/dmaengine.h>
+#endif
 
 #define RIO_NO_HOPCOUNT                -1
 #define RIO_INVALID_DESTID     0xffff
@@ -254,6 +257,9 @@ struct rio_mport {
        u32 phys_efptr;
        unsigned char name[40];
        void *priv;             /* Master port private data */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       struct dma_device       dma;
+#endif
 };
 
 /**
@@ -395,6 +401,47 @@ union rio_pw_msg {
        u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
 };
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+/**
+ * enum rio_write_type - RIO write transaction types used in DMA transfers
+ *
+ * Note: RapidIO specification defines write (NWRITE) and
+ * write-with-response (NWRITE_R) data transfer operations.
+ * Existing DMA controllers that service RapidIO may use one of these operations
+ * for entire data transfer or their combination with only the last data packet
+ * requires response.
+ */
+enum rio_write_type {
+       RDW_DEFAULT,            /* default method used by DMA driver */
+       RDW_ALL_NWRITE,         /* all packets use NWRITE */
+       RDW_ALL_NWRITE_R,       /* all packets use NWRITE_R */
+       RDW_LAST_NWRITE_R,      /* last packet uses NWRITE_R, others - NWRITE */
+};
+
+struct rio_dma_ext {
+       u16 destid;
+       u64 rio_addr;   /* low 64-bits of 66-bit RapidIO address */
+       u8  rio_addr_u;  /* upper 2-bits of 66-bit RapidIO address */
+       enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+struct rio_dma_data {
+       /* Local data (as scatterlist) */
+       struct scatterlist      *sg;    /* I/O scatter list */
+       unsigned int            sg_len; /* size of scatter list */
+       /* Remote device address (flat buffer) */
+       u64 rio_addr;   /* low 64-bits of 66-bit RapidIO address */
+       u8  rio_addr_u;  /* upper 2-bits of 66-bit RapidIO address */
+       enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
+{
+       return container_of(ddev, struct rio_mport, dma);
+}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 /* Architecture and hardware-specific functions */
 extern int rio_register_mport(struct rio_mport *);
 extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
index 7f07470e1ed9443e20cbc1cafe0d696e04bdb8c6..31ad146be3168bd127bdd083a0288f940f1b154b 100644 (file)
@@ -377,6 +377,15 @@ void rio_unregister_driver(struct rio_driver *);
 struct rio_dev *rio_dev_get(struct rio_dev *);
 void rio_dev_put(struct rio_dev *);
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
+extern void rio_release_dma(struct dma_chan *dchan);
+extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
+               struct rio_dev *rdev, struct dma_chan *dchan,
+               struct rio_dma_data *data,
+               enum dma_transfer_direction direction, unsigned long flags);
+#endif
+
 /**
  * rio_name - Get the unique RIO device identifier
  * @rdev: RIO device
index a8e50e44203c868d1957ba6170e9074569f0235e..82a673905edb12ee0ad6e466423439a31b407c49 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
 
 /* The feature bitmap for virtio rpmsg */
 #define VIRTIO_RPMSG_F_NS      0 /* RP supports name service notifications */
@@ -120,7 +122,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
 /**
  * struct rpmsg_endpoint - binds a local rpmsg address to its user
  * @rpdev: rpmsg channel device
+ * @refcount: when this drops to zero, the ept is deallocated
  * @cb: rx callback handler
+ * @cb_lock: must be taken before accessing/changing @cb
  * @addr: local rpmsg address
  * @priv: private data for the driver's use
  *
@@ -140,7 +144,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
  */
 struct rpmsg_endpoint {
        struct rpmsg_channel *rpdev;
+       struct kref refcount;
        rpmsg_rx_cb_t cb;
+       struct mutex cb_lock;
        u32 addr;
        void *priv;
 };
index f45c0b280b5d39873aaca3a3d67b1a01362adba8..4a1f493e0feff18485fc7ccff11da4e7eb12dde7 100644 (file)
@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
 
 
 extern void calc_global_load(unsigned long ticks);
+extern void update_cpu_load_nohz(void);
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
@@ -438,6 +439,7 @@ extern int get_dumpable(struct mm_struct *mm);
                                        /* leave room for more dump flags */
 #define MMF_VM_MERGEABLE       16      /* KSM may merge identical pages */
 #define MMF_VM_HUGEPAGE                17      /* set when VM_HUGEPAGE is set on vma */
+#define MMF_EXE_FILE_CHANGED   18      /* see prctl_set_mm_exe_file() */
 
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 
@@ -875,6 +877,8 @@ struct sched_group_power {
         * Number of busy cpus in this group.
         */
        atomic_t nr_busy_cpus;
+
+       unsigned long cpumask[0]; /* iteration mask */
 };
 
 struct sched_group {
@@ -899,6 +903,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
        return to_cpumask(sg->cpumask);
 }
 
+/*
+ * cpumask masking which cpus in the group are allowed to iterate up the domain
+ * tree.
+ */
+static inline struct cpumask *sched_group_mask(struct sched_group *sg)
+{
+       return to_cpumask(sg->sgp->cpumask);
+}
+
 /**
  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  * @group: The group whose first cpu is to be returned.
@@ -1187,7 +1200,6 @@ struct sched_rt_entity {
        struct list_head run_list;
        unsigned long timeout;
        unsigned int time_slice;
-       int nr_cpus_allowed;
 
        struct sched_rt_entity *back;
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -1252,6 +1264,7 @@ struct task_struct {
 #endif
 
        unsigned int policy;
+       int nr_cpus_allowed;
        cpumask_t cpus_allowed;
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -1301,11 +1314,6 @@ struct task_struct {
        unsigned sched_reset_on_fork:1;
        unsigned sched_contributes_to_load:1;
 
-#ifdef CONFIG_GENERIC_HARDIRQS
-       /* IRQ handler threads */
-       unsigned irq_thread:1;
-#endif
-
        pid_t pid;
        pid_t tgid;
 
@@ -1313,10 +1321,9 @@ struct task_struct {
        /* Canary value for the -fstack-protector gcc feature */
        unsigned long stack_canary;
 #endif
-
-       /* 
+       /*
         * pointers to (original) parent process, youngest child, younger sibling,
-        * older sibling, respectively.  (p->father can be replaced with 
+        * older sibling, respectively.  (p->father can be replaced with
         * p->real_parent->pid)
         */
        struct task_struct __rcu *real_parent; /* real parent process */
@@ -1363,8 +1370,6 @@ struct task_struct {
                                         * credentials (COW) */
        const struct cred __rcu *cred;  /* effective (overridable) subjective task
                                         * credentials (COW) */
-       struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
-
        char comm[TASK_COMM_LEN]; /* executable name excluding path
                                     - access with [gs]et_task_comm (which lock
                                       it with task_lock())
@@ -1400,6 +1405,8 @@ struct task_struct {
        int (*notifier)(void *priv);
        void *notifier_data;
        sigset_t *notifier_mask;
+       struct hlist_head task_works;
+
        struct audit_context *audit_context;
 #ifdef CONFIG_AUDITSYSCALL
        uid_t loginuid;
@@ -1864,22 +1871,12 @@ static inline void rcu_copy_process(struct task_struct *p)
        INIT_LIST_HEAD(&p->rcu_node_entry);
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-       if (prev->rcu_read_lock_nesting != 0)
-               rcu_preempt_note_context_switch();
-}
-
 #else
 
 static inline void rcu_copy_process(struct task_struct *p)
 {
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-}
-
 #endif
 
 #ifdef CONFIG_SMP
@@ -1902,6 +1899,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
 }
 #endif
 
+#ifdef CONFIG_NO_HZ
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ */
+
 #ifndef CONFIG_CPUMASK_OFFSTACK
 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 {
@@ -2213,6 +2218,20 @@ extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
 
+static inline void restore_saved_sigmask(void)
+{
+       if (test_and_clear_restore_sigmask())
+               __set_current_blocked(&current->saved_sigmask);
+}
+
+static inline sigset_t *sigmask_to_save(void)
+{
+       sigset_t *res = &current->blocked;
+       if (unlikely(test_restore_sigmask()))
+               res = &current->saved_sigmask;
+       return res;
+}
+
 static inline int kill_cad_pid(int sig, int priv)
 {
        return kill_pid(cad_pid, sig, priv);
index ab0e091ce5facf0047c57191f9e631fd5c4bb791..4e5a73cdbbef18463920022626931d02c0540eb9 100644 (file)
@@ -86,9 +86,9 @@ extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
 extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
 extern int cap_inode_need_killpriv(struct dentry *dentry);
 extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_file_mmap(struct file *file, unsigned long reqprot,
-                        unsigned long prot, unsigned long flags,
-                        unsigned long addr, unsigned long addr_only);
+extern int cap_mmap_addr(unsigned long addr);
+extern int cap_mmap_file(struct file *file, unsigned long reqprot,
+                        unsigned long prot, unsigned long flags);
 extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
 extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                          unsigned long arg4, unsigned long arg5);
@@ -586,15 +586,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     simple integer value.  When @arg represents a user space pointer, it
  *     should never be used by the security module.
  *     Return 0 if permission is granted.
- * @file_mmap :
+ * @mmap_addr :
+ *     Check permissions for a mmap operation at @addr.
+ *     @addr contains virtual address that will be used for the operation.
+ *     Return 0 if permission is granted.
+ * @mmap_file :
  *     Check permissions for a mmap operation.  The @file may be NULL, e.g.
  *     if mapping anonymous memory.
  *     @file contains the file structure for file to map (may be NULL).
  *     @reqprot contains the protection requested by the application.
  *     @prot contains the protection that will be applied by the kernel.
  *     @flags contains the operational flags.
- *     @addr contains virtual address that will be used for the operation.
- *     @addr_only contains a boolean: 0 if file-backed VMA, otherwise 1.
  *     Return 0 if permission is granted.
  * @file_mprotect:
  *     Check permissions before changing memory access permissions.
@@ -1481,10 +1483,10 @@ struct security_operations {
        void (*file_free_security) (struct file *file);
        int (*file_ioctl) (struct file *file, unsigned int cmd,
                           unsigned long arg);
-       int (*file_mmap) (struct file *file,
+       int (*mmap_addr) (unsigned long addr);
+       int (*mmap_file) (struct file *file,
                          unsigned long reqprot, unsigned long prot,
-                         unsigned long flags, unsigned long addr,
-                         unsigned long addr_only);
+                         unsigned long flags);
        int (*file_mprotect) (struct vm_area_struct *vma,
                              unsigned long reqprot,
                              unsigned long prot);
@@ -1743,9 +1745,9 @@ int security_file_permission(struct file *file, int mask);
 int security_file_alloc(struct file *file);
 void security_file_free(struct file *file);
 int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int security_file_mmap(struct file *file, unsigned long reqprot,
-                       unsigned long prot, unsigned long flags,
-                       unsigned long addr, unsigned long addr_only);
+int security_mmap_file(struct file *file, unsigned long prot,
+                       unsigned long flags);
+int security_mmap_addr(unsigned long addr);
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
                           unsigned long prot);
 int security_file_lock(struct file *file, unsigned int cmd);
@@ -2181,13 +2183,15 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
        return 0;
 }
 
-static inline int security_file_mmap(struct file *file, unsigned long reqprot,
-                                    unsigned long prot,
-                                    unsigned long flags,
-                                    unsigned long addr,
-                                    unsigned long addr_only)
+static inline int security_mmap_file(struct file *file, unsigned long prot,
+                                    unsigned long flags)
+{
+       return 0;
+}
+
+static inline int security_mmap_addr(unsigned long addr)
 {
-       return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
+       return cap_mmap_addr(addr);
 }
 
 static inline int security_file_mprotect(struct vm_area_struct *vma,
index 17046cc484bced6426c36b81f43ce43fdf0ffde4..26b424adc84299b6a53c9ad986e4d9350d16b68a 100644 (file)
@@ -250,12 +250,13 @@ extern long do_sigpending(void __user *, unsigned long);
 extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
                                const struct timespec *);
 extern int sigprocmask(int, sigset_t *, sigset_t *);
-extern void set_current_blocked(const sigset_t *);
+extern void set_current_blocked(sigset_t *);
+extern void __set_current_blocked(const sigset_t *);
 extern int show_unhandled_signals;
 extern int sigsuspend(sigset_t *);
 
 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
-extern void block_sigmask(struct k_sigaction *ka, int signr);
+extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping);
 extern void exit_signals(struct task_struct *tsk);
 
 extern struct kmem_cache *sighand_cachep;
index 0e501714d47fa1a885b79079d7eb82bbbb71d831..642cb7355df3ac82fff918f9ac02feff33bec0f2 100644 (file)
@@ -225,14 +225,11 @@ enum {
        /* device driver is going to provide hardware time stamp */
        SKBTX_IN_PROGRESS = 1 << 2,
 
-       /* ensure the originating sk reference is available on driver level */
-       SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
-
        /* device driver supports TX zero-copy buffers */
-       SKBTX_DEV_ZEROCOPY = 1 << 4,
+       SKBTX_DEV_ZEROCOPY = 1 << 3,
 
        /* generate wifi status information (where possible) */
-       SKBTX_WIFI_STATUS = 1 << 5,
+       SKBTX_WIFI_STATUS = 1 << 4,
 };
 
 /*
@@ -1896,8 +1893,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
 {
        int delta = 0;
 
-       if (headroom < NET_SKB_PAD)
-               headroom = NET_SKB_PAD;
        if (headroom > skb_headroom(skb))
                delta = headroom - skb_headroom(skb);
 
index a595dce6b0c7596d1481e2c87a2b55028c66a449..67d5d94b783a4b4ba97b53fc9d0adf9fd885af34 100644 (file)
@@ -242,7 +242,7 @@ size_t ksize(const void *);
  */
 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
-       if (size != 0 && n > ULONG_MAX / size)
+       if (size != 0 && n > SIZE_MAX / size)
                return NULL;
        return __kmalloc(n * size, flags);
 }
index d3e1075f7b6031b3d1e56e045bb91157e14bb5b0..c73d1445c77ecfc13a4867f60444f6c3a1adc183 100644 (file)
@@ -43,7 +43,7 @@ struct pxa2xx_spi_chip {
        void (*cs_control)(u32 command);
 };
 
-#ifdef CONFIG_ARCH_PXA
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 
 #include <linux/clk.h>
 #include <mach/dma.h>
index 26e5b613deda9e63816b58c59f53f06d7dccd50c..09a545a7dfa39bcd7f736f446358d4c3b112aae4 100644 (file)
@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
        struct page **pages;            /* page map */
        struct partial_page *partial;   /* pages[] may not be contig */
-       int nr_pages;                   /* number of pages in map */
+       int nr_pages;                   /* number of populated pages in map */
+       unsigned int nr_pages_max;      /* pages[] & partial[] arrays size */
        unsigned int flags;             /* splice flags */
        const struct pipe_buf_operations *ops;/* ops associated with output pipe */
        void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
  * for dynamic pipe sizing
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
index 51b29ac45a8e7b26583df0217ab37a0d939ad6da..40e0a273faea3c07470e19fd23673fda89543f9b 100644 (file)
@@ -232,7 +232,6 @@ struct svc_rqst {
        struct svc_pool *       rq_pool;        /* thread pool */
        struct svc_procedure *  rq_procinfo;    /* procedure info */
        struct auth_ops *       rq_authop;      /* authentication flavour */
-       u32                     rq_flavor;      /* pseudoflavor */
        struct svc_cred         rq_cred;        /* auth info */
        void *                  rq_xprt_ctxt;   /* transport specific context ptr */
        struct svc_deferred_req*rq_deferred;    /* deferred request we are replaying */
@@ -416,6 +415,7 @@ struct svc_procedure {
  */
 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
+int svc_bind(struct svc_serv *serv, struct net *net);
 struct svc_serv *svc_create(struct svc_program *, unsigned int,
                            void (*shutdown)(struct svc_serv *, struct net *net));
 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
index 548790e9113b317dbc8de0c46a691df3c0030269..dd74084a9799891309f54db25b8259ae3388f3c8 100644 (file)
 #include <linux/sunrpc/msg_prot.h>
 #include <linux/sunrpc/cache.h>
 #include <linux/hash.h>
+#include <linux/cred.h>
 
-#define SVC_CRED_NGROUPS       32
 struct svc_cred {
        uid_t                   cr_uid;
        gid_t                   cr_gid;
        struct group_info       *cr_group_info;
+       u32                     cr_flavor; /* pseudoflavor */
+       char                    *cr_principal; /* for gss */
 };
 
+static inline void free_svc_cred(struct svc_cred *cred)
+{
+       if (cred->cr_group_info)
+               put_group_info(cred->cr_group_info);
+       kfree(cred->cr_principal);
+}
+
 struct svc_rqst;               /* forward decl */
 struct in6_addr;
 
@@ -131,7 +140,7 @@ extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *ne
 extern struct auth_domain *auth_domain_find(char *name);
 extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
 extern int auth_unix_forget_old(struct auth_domain *dom);
-extern void svcauth_unix_purge(void);
+extern void svcauth_unix_purge(struct net *net);
 extern void svcauth_unix_info_release(struct svc_xprt *xpt);
 extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
 
index 7c32daa025eb07b644d8185a27c8ea10d8b7c55f..726aff1a52011fcdfd3ab1e11b8a82ff1dbea703 100644 (file)
@@ -22,7 +22,6 @@ int gss_svc_init_net(struct net *net);
 void gss_svc_shutdown_net(struct net *net);
 int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
 u32 svcauth_gss_flavor(struct auth_domain *dom);
-char *svc_gss_principal(struct svc_rqst *);
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
index b6661933e252643956cf3e8d389267f535d49653..c84ec68eaec957b2e16a059b6feccde505f7f241 100644 (file)
@@ -197,6 +197,10 @@ struct swap_info_struct {
        struct block_device *bdev;      /* swap device or bdev of swap file */
        struct file *swap_file;         /* seldom referenced */
        unsigned int old_block_size;    /* seldom referenced */
+#ifdef CONFIG_FRONTSWAP
+       unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
+       atomic_t frontswap_pages;       /* frontswap pages in-use counter */
+#endif
 };
 
 struct swap_list_t {
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
new file mode 100644 (file)
index 0000000..e282624
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _LINUX_SWAPFILE_H
+#define _LINUX_SWAPFILE_H
+
+/*
+ * these were static in swapfile.c but frontswap.c needs them and we don't
+ * want to expose them to the dozens of source files that include swap.h
+ */
+extern spinlock_t swap_lock;
+extern struct swap_list_t swap_list;
+extern struct swap_info_struct *swap_info[];
+extern int try_to_unuse(unsigned int, bool, unsigned long);
+
+#endif /* _LINUX_SWAPFILE_H */
index 792d16d9cbc74ff903c9d89e21726e1b5e3f54f7..47ead515c81197fc897bc7ef37fead3d9ab4b39f 100644 (file)
@@ -9,13 +9,15 @@
  * get good packing density in that tree, so the index should be dense in
  * the low-order bits.
  *
- * We arrange the `type' and `offset' fields so that `type' is at the five
+ * We arrange the `type' and `offset' fields so that `type' is at the seven
  * high-order bits of the swp_entry_t and `offset' is right-aligned in the
- * remaining bits.
+ * remaining bits.  Although `type' itself needs only five bits, we allow for
+ * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  *
  * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  */
-#define SWP_TYPE_SHIFT(e)      (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT)
+#define SWP_TYPE_SHIFT(e)      ((sizeof(e.val) * 8) - \
+                       (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
 #define SWP_OFFSET_MASK(e)     ((1UL << SWP_TYPE_SHIFT(e)) - 1)
 
 /*
index 3de3acb84a952ead111b90391756873efc15ebcb..19439c75c5b255751e2467b5405861763f131fd5 100644 (file)
@@ -858,4 +858,6 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
                                      unsigned long riovcnt,
                                      unsigned long flags);
 
+asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
+                        unsigned long idx1, unsigned long idx2);
 #endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
new file mode 100644 (file)
index 0000000..294d5d5
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _LINUX_TASK_WORK_H
+#define _LINUX_TASK_WORK_H
+
+#include <linux/list.h>
+#include <linux/sched.h>
+
+struct task_work;
+typedef void (*task_work_func_t)(struct task_work *);
+
+struct task_work {
+       struct hlist_node hlist;
+       task_work_func_t func;
+       void *data;
+};
+
+static inline void
+init_task_work(struct task_work *twork, task_work_func_t func, void *data)
+{
+       twork->func = func;
+       twork->data = data;
+}
+
+int task_work_add(struct task_struct *task, struct task_work *twork, bool);
+struct task_work *task_work_cancel(struct task_struct *, task_work_func_t);
+void task_work_run(void);
+
+static inline void exit_task_work(struct task_struct *task)
+{
+       if (unlikely(!hlist_empty(&task->task_works)))
+               task_work_run();
+}
+
+#endif /* _LINUX_TASK_WORK_H */
index 4c5b63283377449ff94252f1ff64e2a8b16fddcf..5f359dbfcdce5bbf40b9fe5cada0d59d60981e5f 100644 (file)
@@ -69,16 +69,16 @@ union tcp_word_hdr {
 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 
 
 enum { 
-       TCP_FLAG_CWR = __cpu_to_be32(0x00800000),
-       TCP_FLAG_ECE = __cpu_to_be32(0x00400000),
-       TCP_FLAG_URG = __cpu_to_be32(0x00200000),
-       TCP_FLAG_ACK = __cpu_to_be32(0x00100000),
-       TCP_FLAG_PSH = __cpu_to_be32(0x00080000),
-       TCP_FLAG_RST = __cpu_to_be32(0x00040000),
-       TCP_FLAG_SYN = __cpu_to_be32(0x00020000),
-       TCP_FLAG_FIN = __cpu_to_be32(0x00010000),
-       TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000),
-       TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
+       TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
+       TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
+       TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
+       TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000),
+       TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000),
+       TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000),
+       TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000),
+       TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
+       TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
+       TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
 }; 
 
 /*
index db78775eff3b209b534d157d4cec7d90eec8b175..ccc1899bd62e991e4649b72e7145010f93f948f9 100644 (file)
@@ -8,6 +8,7 @@
 #define _LINUX_THREAD_INFO_H
 
 #include <linux/types.h>
+#include <linux/bug.h>
 
 struct timespec;
 struct compat_timespec;
@@ -125,10 +126,26 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 static inline void set_restore_sigmask(void)
 {
        set_thread_flag(TIF_RESTORE_SIGMASK);
-       set_thread_flag(TIF_SIGPENDING);
+       WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+       clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_restore_sigmask(void)
+{
+       return test_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 #endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
 
+#ifndef HAVE_SET_RESTORE_SIGMASK
+#error "no set_restore_sigmask() provided and default one won't work"
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 51bd91d911c3b3233e90301a4092a84750827776..6a4d82bedb03d4f6e9742069c40324c0441265f0 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <linux/security.h>
+#include <linux/task_work.h>
 struct linux_binprm;
 
 /*
@@ -153,7 +154,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
                ptrace_notify(SIGTRAP);
 }
 
-#ifdef TIF_NOTIFY_RESUME
 /**
  * set_notify_resume - cause tracehook_notify_resume() to be called
  * @task:              task that will call tracehook_notify_resume()
@@ -165,8 +165,10 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
  */
 static inline void set_notify_resume(struct task_struct *task)
 {
+#ifdef TIF_NOTIFY_RESUME
        if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
                kick_process(task);
+#endif
 }
 
 /**
@@ -184,7 +186,14 @@ static inline void set_notify_resume(struct task_struct *task)
  */
 static inline void tracehook_notify_resume(struct pt_regs *regs)
 {
+       /*
+        * The caller just cleared TIF_NOTIFY_RESUME. This barrier
+        * pairs with task_work_add()->set_notify_resume() after
+        * hlist_add_head(task->task_works);
+        */
+       smp_mb__after_clear_bit();
+       if (unlikely(!hlist_empty(&current->task_works)))
+               task_work_run();
 }
-#endif /* TIF_NOTIFY_RESUME */
 
 #endif /* <linux/tracehook.h> */
index 4990ef2b1fb75501a0af41d4199896279de136f1..9f47ab540f65e997b79b0a16c52332c564354234 100644 (file)
@@ -268,7 +268,6 @@ struct tty_struct {
        struct mutex ldisc_mutex;
        struct tty_ldisc *ldisc;
 
-       struct mutex legacy_mutex;
        struct mutex termios_mutex;
        spinlock_t ctrl_lock;
        /* Termios values are protected by the termios mutex */
@@ -606,12 +605,8 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
 
 /* tty_mutex.c */
 /* functions for preparation of BKL removal */
-extern void __lockfunc tty_lock(struct tty_struct *tty);
-extern void __lockfunc tty_unlock(struct tty_struct *tty);
-extern void __lockfunc tty_lock_pair(struct tty_struct *tty,
-                               struct tty_struct *tty2);
-extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
-                               struct tty_struct *tty2);
+extern void __lockfunc tty_lock(void) __acquires(tty_lock);
+extern void __lockfunc tty_unlock(void) __releases(tty_lock);
 
 /*
  * this shall be called only from where BTM is held (like close)
@@ -626,9 +621,9 @@ extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
 static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
                long timeout)
 {
-       tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */
+       tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */
        tty_wait_until_sent(tty, timeout);
-       tty_lock(tty);
+       tty_lock();
 }
 
 /*
@@ -643,16 +638,16 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
  *
  * Do not use in new code.
  */
-#define wait_event_interruptible_tty(tty, wq, condition)               \
+#define wait_event_interruptible_tty(wq, condition)                    \
 ({                                                                     \
        int __ret = 0;                                                  \
        if (!(condition)) {                                             \
-               __wait_event_interruptible_tty(tty, wq, condition, __ret);      \
+               __wait_event_interruptible_tty(wq, condition, __ret);   \
        }                                                               \
        __ret;                                                          \
 })
 
-#define __wait_event_interruptible_tty(tty, wq, condition, ret)                \
+#define __wait_event_interruptible_tty(wq, condition, ret)             \
 do {                                                                   \
        DEFINE_WAIT(__wait);                                            \
                                                                        \
@@ -661,9 +656,9 @@ do {                                                                        \
                if (condition)                                          \
                        break;                                          \
                if (!signal_pending(current)) {                         \
-                       tty_unlock(tty);                                        \
+                       tty_unlock();                                   \
                        schedule();                                     \
-                       tty_lock(tty);                                  \
+                       tty_lock();                                     \
                        continue;                                       \
                }                                                       \
                ret = -ERESTARTSYS;                                     \
index 7f480db60231a714b9e520f3a16856c5d4e4a5e1..9c1bd539ea70e780e0e926b54bfc9320d3ec34a4 100644 (file)
@@ -25,7 +25,7 @@ typedef __kernel_dev_t                dev_t;
 typedef __kernel_ino_t         ino_t;
 typedef __kernel_mode_t                mode_t;
 typedef unsigned short         umode_t;
-typedef __kernel_nlink_t       nlink_t;
+typedef __u32                  nlink_t;
 typedef __kernel_off_t         off_t;
 typedef __kernel_pid_t         pid_t;
 typedef __kernel_daddr_t       daddr_t;
index 7f855d50cdf5567b66b31f2bfb073ac025357c00..49b3ac29726adf3040d47384b51948401ac44887 100644 (file)
@@ -126,8 +126,6 @@ struct usb_hcd {
        unsigned                wireless:1;     /* Wireless USB HCD */
        unsigned                authorized_default:1;
        unsigned                has_tt:1;       /* Integrated TT in root hub */
-       unsigned                broken_pci_sleep:1;     /* Don't put the
-                       controller in PCI-D3 for system sleep */
 
        unsigned int            irq;            /* irq allocated */
        void __iomem            *regs;          /* device memory/io */
index b455c7c212eb6de26716a44ec3674eb2ebcfc270..ddb419cf4530339f37d7e239d134cc4222d3d900 100644 (file)
@@ -7,11 +7,19 @@
  * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
  */
 
+#ifndef _LINUX_VGA_SWITCHEROO_H_
+#define _LINUX_VGA_SWITCHEROO_H_
+
 #include <linux/fb.h>
 
+struct pci_dev;
+
 enum vga_switcheroo_state {
        VGA_SWITCHEROO_OFF,
        VGA_SWITCHEROO_ON,
+       /* below are referred only from vga_switcheroo_get_client_state() */
+       VGA_SWITCHEROO_INIT,
+       VGA_SWITCHEROO_NOT_FOUND,
 };
 
 enum vga_switcheroo_client_id {
@@ -50,6 +58,8 @@ void vga_switcheroo_unregister_handler(void);
 
 int vga_switcheroo_process_delayed_switch(void);
 
+int vga_switcheroo_get_client_state(struct pci_dev *dev);
+
 #else
 
 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -62,5 +72,8 @@ static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
        int id, bool active) { return 0; }
 static inline void vga_switcheroo_unregister_handler(void) {}
 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
+
 
 #endif
+#endif /* _LINUX_VGA_SWITCHEROO_H_ */
index 370d11106c1116811b8bf668892bf4fadf6a8b0d..2039c5d3292e801c0abb8c770c99cdfe287ce9bf 100644 (file)
@@ -2640,9 +2640,9 @@ struct v4l2_create_buffers {
 
 /* Experimental, these three ioctls may change over the next couple of kernel
    versions. */
-#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 96, struct v4l2_enum_dv_timings)
-#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 97, struct v4l2_dv_timings)
-#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 98, struct v4l2_dv_timings_cap)
+#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 98, struct v4l2_enum_dv_timings)
+#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 99, struct v4l2_dv_timings)
+#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 100, struct v4l2_dv_timings_cap)
 
 /* Reminder: when adding new ioctls please add support for them to
    drivers/media/video/v4l2-compat-ioctl32.c as well! */
index ac40716b44e9a2a9ee1c13d1e878a195ce7215b0..da70f0facd2b77215e79860e5af8104da30b03a2 100644 (file)
@@ -45,6 +45,8 @@ struct watchdog_info {
 #define        WDIOF_SETTIMEOUT        0x0080  /* Set timeout (in seconds) */
 #define        WDIOF_MAGICCLOSE        0x0100  /* Supports magic close char */
 #define        WDIOF_PRETIMEOUT        0x0200  /* Pretimeout (in seconds), get/set */
+#define        WDIOF_ALARMONLY         0x0400  /* Watchdog triggers a management or
+                                          other external alarm not a reboot */
 #define        WDIOF_KEEPALIVEPING     0x8000  /* Keep alive ping reply */
 
 #define        WDIOS_DISABLECARD       0x0001  /* Turn off the watchdog timer */
@@ -54,6 +56,8 @@ struct watchdog_info {
 #ifdef __KERNEL__
 
 #include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
 
 struct watchdog_ops;
 struct watchdog_device;
@@ -67,6 +71,8 @@ struct watchdog_device;
  * @status:    The routine that shows the status of the watchdog device.
  * @set_timeout:The routine for setting the watchdog devices timeout value.
  * @get_timeleft:The routine that get's the time that's left before a reset.
+ * @ref:       The ref operation for dyn. allocated watchdog_device structs
+ * @unref:     The unref operation for dyn. allocated watchdog_device structs
  * @ioctl:     The routines that handles extra ioctl calls.
  *
  * The watchdog_ops structure contains a list of low-level operations
@@ -84,11 +90,17 @@ struct watchdog_ops {
        unsigned int (*status)(struct watchdog_device *);
        int (*set_timeout)(struct watchdog_device *, unsigned int);
        unsigned int (*get_timeleft)(struct watchdog_device *);
+       void (*ref)(struct watchdog_device *);
+       void (*unref)(struct watchdog_device *);
        long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
 };
 
 /** struct watchdog_device - The structure that defines a watchdog device
  *
+ * @id:                The watchdog's ID. (Allocated by watchdog_register_device)
+ * @cdev:      The watchdog's Character device.
+ * @dev:       The device for our watchdog
+ * @parent:    The parent bus device
  * @info:      Pointer to a watchdog_info structure.
  * @ops:       Pointer to the list of watchdog operations.
  * @bootstatus:        Status of the watchdog device at boot.
@@ -96,6 +108,7 @@ struct watchdog_ops {
  * @min_timeout:The watchdog devices minimum timeout value.
  * @max_timeout:The watchdog devices maximum timeout value.
  * @driver-data:Pointer to the drivers private data.
+ * @lock:      Lock for watchdog core internal use only.
  * @status:    Field that contains the devices internal status bits.
  *
  * The watchdog_device structure contains all information about a
@@ -103,8 +116,15 @@ struct watchdog_ops {
  *
  * The driver-data field may not be accessed directly. It must be accessed
  * via the watchdog_set_drvdata and watchdog_get_drvdata helpers.
+ *
+ * The lock field is for watchdog core internal use only and should not be
+ * touched.
  */
 struct watchdog_device {
+       int id;
+       struct cdev cdev;
+       struct device *dev;
+       struct device *parent;
        const struct watchdog_info *info;
        const struct watchdog_ops *ops;
        unsigned int bootstatus;
@@ -112,12 +132,14 @@ struct watchdog_device {
        unsigned int min_timeout;
        unsigned int max_timeout;
        void *driver_data;
+       struct mutex lock;
        unsigned long status;
 /* Bit numbers for status flags */
 #define WDOG_ACTIVE            0       /* Is the watchdog running/active */
 #define WDOG_DEV_OPEN          1       /* Opened via /dev/watchdog ? */
 #define WDOG_ALLOW_RELEASE     2       /* Did we receive the magic char ? */
 #define WDOG_NO_WAY_OUT                3       /* Is 'nowayout' feature set ? */
+#define WDOG_UNREGISTERED      4       /* Has the device been unregistered */
 };
 
 #ifdef CONFIG_WATCHDOG_NOWAYOUT
@@ -128,6 +150,12 @@ struct watchdog_device {
 #define WATCHDOG_NOWAYOUT_INIT_STATUS  0
 #endif
 
+/* Use the following function to check wether or not the watchdog is active */
+static inline bool watchdog_active(struct watchdog_device *wdd)
+{
+       return test_bit(WDOG_ACTIVE, &wdd->status);
+}
+
 /* Use the following function to set the nowayout feature */
 static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
 {
index 66a7b579e31c81912f635beb272cebf82a05ac57..3def64ba77fa0100bd62f0072cc6989e0f403f3f 100644 (file)
@@ -1144,6 +1144,12 @@ struct extended_inquiry_info {
        __u8     data[240];
 } __packed;
 
+#define HCI_EV_KEY_REFRESH_COMPLETE    0x30
+struct hci_ev_key_refresh_complete {
+       __u8    status;
+       __le16  handle;
+} __packed;
+
 #define HCI_EV_IO_CAPA_REQUEST         0x31
 struct hci_ev_io_capa_request {
        bdaddr_t bdaddr;
index 9808877c2ab91a609a79494fbd4945dd8def5bd3..a7a683e30b64e6beb2bc87907c85576d85385007 100644 (file)
@@ -42,6 +42,7 @@
 #include <net/netlabel.h>
 #include <net/request_sock.h>
 #include <linux/atomic.h>
+#include <asm/unaligned.h>
 
 /* known doi values */
 #define CIPSO_V4_DOI_UNKNOWN          0x00000000
@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
 static inline int cipso_v4_validate(const struct sk_buff *skb,
                                    unsigned char **option)
 {
-       return -ENOSYS;
+       unsigned char *opt = *option;
+       unsigned char err_offset = 0;
+       u8 opt_len = opt[1];
+       u8 opt_iter;
+
+       if (opt_len < 8) {
+               err_offset = 1;
+               goto out;
+       }
+
+       if (get_unaligned_be32(&opt[2]) == 0) {
+               err_offset = 2;
+               goto out;
+       }
+
+       for (opt_iter = 6; opt_iter < opt_len;) {
+               if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+                       err_offset = opt_iter + 1;
+                       goto out;
+               }
+               opt_iter += opt[opt_iter + 1];
+       }
+
+out:
+       *option = opt + err_offset;
+       return err_offset;
+
 }
 #endif /* CONFIG_NETLABEL */
 
index bed833d9796aed86bac5ca7d45ad53cde3447e52..8197eadca819633eb97f3919286a7e6b19bbcddd 100644 (file)
@@ -60,6 +60,7 @@ struct dst_entry {
 #define DST_NOCOUNT            0x0020
 #define DST_NOPEER             0x0040
 #define DST_FAKE_RTABLE                0x0080
+#define DST_XFRM_TUNNEL                0x0100
 
        short                   error;
        short                   obsolete;
index b94765e38e8074aa5a877c90b395f0bc8cb6bb4a..2040bff945d4562e0c0129d078a26a0a1be34672 100644 (file)
@@ -40,7 +40,10 @@ struct inet_peer {
        u32                     pmtu_orig;
        u32                     pmtu_learned;
        struct inetpeer_addr_base redirect_learned;
-       struct list_head        gc_list;
+       union {
+               struct list_head        gc_list;
+               struct rcu_head     gc_rcu;
+       };
        /*
         * Once inet_peer is queued for deletion (refcnt == -1), following fields
         * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
index d6146b4811c212601a365eee7c9c28789b76481a..95374d1696a163a75f8aa006bf99fe958df195aa 100644 (file)
@@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
        if (!ct || !nf_ct_is_untracked(ct)) {
-               nf_reset(skb);
+               nf_conntrack_put(skb->nfct);
                skb->nfct = &nf_ct_untracked_get()->ct_general;
                skb->nfctinfo = IP_CT_NEW;
                nf_conntrack_get(skb->nfct);
index 1937c7d98304fc0ecfd6d7315c9e3cbb9e93a557..95e39b6a02ec924ab1229e728ddb4bd45ff3d682 100644 (file)
@@ -1940,6 +1940,11 @@ enum ieee80211_rate_control_changed {
  *     to also unregister the device. If it returns 1, then mac80211
  *     will also go through the regular complete restart on resume.
  *
+ * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
+ *     modified. The reason is that device_set_wakeup_enable() is
+ *     supposed to be called when the configuration changes, not only
+ *     in suspend().
+ *
  * @add_interface: Called when a netdevice attached to the hardware is
  *     enabled. Because it is not called for monitor mode devices, @start
  *     and @stop must be implemented.
@@ -2966,6 +2971,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
  * ieee80211_generic_frame_duration - Calculate the duration field for a frame
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @band: the band to calculate the frame duration on
  * @frame_len: the length of the frame.
  * @rate: the rate at which the frame is going to be transmitted.
  *
index a88fb6939387f228ac5826949f68151e0fceaf16..e1ce1048fe5fa1142196cb88e08829b7bb1d60f5 100644 (file)
@@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
-       if (net->ct.nf_conntrack_event_cb == NULL)
+       if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
                return;
 
        e = nf_ct_ecache_find(ct);
index 928daf595bebc04b840ddca77d649688f05eb2d1..bcd525e39a0ba89549f83c381b936d24db59bcf2 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index ed2b78e2375d0de3ad537f4b901630d0ffa7b69d..98705468ac0329c884bfd0e2663422b73de565b8 100644 (file)
@@ -130,9 +130,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
 {
        struct flowi4 fl4 = {
                .flowi4_oif = oif,
+               .flowi4_tos = tos,
                .daddr = daddr,
                .saddr = saddr,
-               .flowi4_tos = tos,
        };
        return ip_route_output_key(net, &fl4);
 }
index 55ce96b53b092e3ca04db6eaa7d6888fb9f478bc..9d7d54a00e63f28feb80942bcaf7afb7ee68cd5c 100644 (file)
@@ -220,13 +220,16 @@ struct tcf_proto {
 
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
-       unsigned char           data[24];
+       u16                     bond_queue_mapping;
+       u16                     _pad;
+       unsigned char           data[20];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
        struct qdisc_skb_cb *qcb;
-       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
+
+       BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
        BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
index e4652fe5895863d956a52e436d10f1fc3724415b..fecdf31816f2fc6a834a060cfdd39158dcb30bc2 100644 (file)
@@ -912,6 +912,9 @@ struct sctp_transport {
                /* Is this structure kfree()able? */
                malloced:1;
 
+       /* Has this transport moved the ctsn since we last sacked */
+       __u32 sack_generation;
+
        struct flowi fl;
 
        /* This is the peer's IP address and port. */
@@ -1584,6 +1587,7 @@ struct sctp_association {
                 */
                __u8    sack_needed;     /* Do we need to sack the peer? */
                __u32   sack_cnt;
+               __u32   sack_generation;
 
                /* These are capabilities which our peer advertised.  */
                __u8    ecn_capable:1,      /* Can peer do ECN? */
index e7728bc14ccfde5bd85c3b08f990487af66ab072..2c5d2b4d5d1eb51542df26094700250ab6191070 100644 (file)
@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+                    struct sctp_transport *trans);
 
 /* Mark this TSN and all lower as seen. */
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
new file mode 100644 (file)
index 0000000..604cb9b
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011-2012 Intel Corporation.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef FCOE_SYSFS
+#define FCOE_SYSFS
+
+#include <linux/if_ether.h>
+#include <linux/device.h>
+#include <scsi/fc/fc_fcoe.h>
+
+struct fcoe_ctlr_device;
+struct fcoe_fcf_device;
+
+struct fcoe_sysfs_function_template {
+       void (*get_fcoe_ctlr_link_fail)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_vlink_fail)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_miss_fka)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
+       void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
+};
+
+#define dev_to_ctlr(d)                                 \
+       container_of((d), struct fcoe_ctlr_device, dev)
+
+enum fip_conn_type {
+       FIP_CONN_TYPE_UNKNOWN,
+       FIP_CONN_TYPE_FABRIC,
+       FIP_CONN_TYPE_VN2VN,
+};
+
+struct fcoe_ctlr_device {
+       u32                             id;
+
+       struct device                   dev;
+       struct fcoe_sysfs_function_template *f;
+
+       struct list_head                fcfs;
+       char                            work_q_name[20];
+       struct workqueue_struct         *work_q;
+       char                            devloss_work_q_name[20];
+       struct workqueue_struct         *devloss_work_q;
+       struct mutex                    lock;
+
+       int                             fcf_dev_loss_tmo;
+       enum fip_conn_type              mode;
+
+       /* expected in host order for displaying */
+       struct fcoe_fc_els_lesb         lesb;
+};
+
+static inline void *fcoe_ctlr_device_priv(const struct fcoe_ctlr_device *ctlr)
+{
+       return (void *)(ctlr + 1);
+}
+
+/* fcf states */
+enum fcf_state {
+       FCOE_FCF_STATE_UNKNOWN,
+       FCOE_FCF_STATE_DISCONNECTED,
+       FCOE_FCF_STATE_CONNECTED,
+       FCOE_FCF_STATE_DELETED,
+};
+
+struct fcoe_fcf_device {
+       u32                 id;
+       struct device       dev;
+       struct list_head    peers;
+       struct work_struct  delete_work;
+       struct delayed_work dev_loss_work;
+       u32                 dev_loss_tmo;
+       void                *priv;
+       enum fcf_state      state;
+
+       u64                 fabric_name;
+       u64                 switch_name;
+       u32                 fc_map;
+       u16                 vfid;
+       u8                  mac[ETH_ALEN];
+       u8                  priority;
+       u32                 fka_period;
+       u8                  selected;
+       u16                 vlan_id;
+};
+
+#define dev_to_fcf(d)                                  \
+       container_of((d), struct fcoe_fcf_device, dev)
+/* parentage should never be missing */
+#define fcoe_fcf_dev_to_ctlr_dev(x)            \
+       dev_to_ctlr((x)->dev.parent)
+#define fcoe_fcf_device_priv(x)                        \
+       ((x)->priv)
+
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+                           struct fcoe_sysfs_function_template *f,
+                           int priv_size);
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *);
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *,
+                                           struct fcoe_fcf_device *);
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *);
+
+int __init fcoe_sysfs_setup(void);
+void __exit fcoe_sysfs_teardown(void);
+
+#endif /* FCOE_SYSFS */
index cfdb55f0937e37002d21be6eaf9f81833683aa4b..22b07cc99808562c86a3de2728ec0557291e56dc 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/random.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <scsi/libfc.h>
+#include <scsi/fcoe_sysfs.h>
 
 #define FCOE_MAX_CMD_LEN       16      /* Supported CDB length */
 
@@ -158,9 +159,25 @@ struct fcoe_ctlr {
        spinlock_t ctlr_lock;
 };
 
+/**
+ * fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr
+ * @cltr: The fcoe_ctlr whose private data will be returned
+ */
+static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
+{
+       return (void *)(ctlr + 1);
+}
+
+#define fcoe_ctlr_to_ctlr_dev(x)                                       \
+       (struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
+
 /**
  * struct fcoe_fcf - Fibre-Channel Forwarder
  * @list:       list linkage
+ * @event_work:  Work for FC Transport actions queue
+ * @event:       The event to be processed
+ * @fip:         The controller that the FCF was discovered on
+ * @fcf_dev:     The associated fcoe_fcf_device instance
  * @time:       system time (jiffies) when an advertisement was last received
  * @switch_name: WWN of switch from advertisement
  * @fabric_name: WWN of fabric from advertisement
@@ -182,6 +199,9 @@ struct fcoe_ctlr {
  */
 struct fcoe_fcf {
        struct list_head list;
+       struct work_struct event_work;
+       struct fcoe_ctlr *fip;
+       struct fcoe_fcf_device *fcf_dev;
        unsigned long time;
 
        u64 switch_name;
@@ -198,6 +218,9 @@ struct fcoe_fcf {
        u8 fd_flags:1;
 };
 
+#define fcoe_fcf_to_fcf_dev(x)                 \
+       ((x)->fcf_dev)
+
 /**
  * struct fcoe_rport - VN2VN remote port
  * @time:      time of create or last beacon packet received from node
@@ -333,6 +356,10 @@ void fcoe_queue_timer(ulong lport);
 int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
                           struct fcoe_percpu_s *fps);
 
+/* FCoE Sysfs helpers */
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+
 /**
  * struct netdev_list
  * A mapping from netdevice to fcoe_transport
index f4f1c96dca726ff2f00211994dcf7381ef55b5b0..10ce74f589c5fd57622f630a17fbbc7e736d24e4 100644 (file)
@@ -163,6 +163,8 @@ enum ata_command_set {
         ATAPI_COMMAND_SET = 1,
 };
 
+#define ATA_RESP_FIS_SIZE 24
+
 struct sata_device {
         enum   ata_command_set command_set;
         struct smp_resp        rps_resp; /* report_phy_sata_resp */
@@ -171,7 +173,7 @@ struct sata_device {
 
        struct ata_port *ap;
        struct ata_host ata_host;
-       struct ata_taskfile tf;
+       u8     fis[ATA_RESP_FIS_SIZE];
 };
 
 enum {
@@ -537,7 +539,7 @@ enum exec_status {
  */
 struct ata_task_resp {
        u16  frame_len;
-       u8   ending_fis[24];      /* dev to host or data-in */
+       u8   ending_fis[ATA_RESP_FIS_SIZE];       /* dev to host or data-in */
 };
 
 #define SAS_STATUS_BUF_SIZE 96
index 1e1198546c725d43a7ff6baa657c25ef364d73ee..ac06cc595890ef87a8e01632f5872ab11e4057f2 100644 (file)
@@ -134,10 +134,16 @@ struct scsi_cmnd {
 
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
+       struct scsi_driver **sdp;
+
        if (!cmd->request->rq_disk)
                return NULL;
 
-       return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+       sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
+       if (!sdp)
+               return NULL;
+
+       return *sdp;
 }
 
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
index 6efb2e1416e073924a94442762459d89e118f520..ba969885232101e9eeed555e11c2385887b9cb85 100644 (file)
@@ -151,6 +151,7 @@ struct scsi_device {
                                           SD_LAST_BUGGY_SECTORS */
        unsigned no_read_disc_info:1;   /* Avoid READ_DISC_INFO cmds */
        unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
+       unsigned try_rc_10_first:1;     /* Try READ_CAPACACITY_10 first */
        unsigned is_visible:1;  /* is the device visible in sysfs */
 
        DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
index ec3f910aa40b139bdf61eec45c55376225f8647b..0c3c2fb0f9395be596699830a62939cc83dd1609 100644 (file)
@@ -44,6 +44,7 @@ struct snd_tea575x_ops {
 
 struct snd_tea575x {
        struct v4l2_device *v4l2_dev;
+       struct v4l2_file_operations fops;
        struct video_device vd;         /* video device */
        int radio_nr;                   /* radio_nr */
        bool tea5759;                   /* 5759 chip is present */
@@ -62,7 +63,7 @@ struct snd_tea575x {
        int (*ext_init)(struct snd_tea575x *tea);
 };
 
-int snd_tea575x_init(struct snd_tea575x *tea);
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
 void snd_tea575x_exit(struct snd_tea575x *tea);
 
 #endif /* __SOUND_TEA575X_TUNER_H */
index 116959933f46442b27075ba7d40c79c746308049..c78a23333c4fb801c72e86a55b05fd7e7bf66f67 100644 (file)
@@ -47,6 +47,7 @@ struct target_core_fabric_ops {
         */
        int (*check_stop_free)(struct se_cmd *);
        void (*release_cmd)(struct se_cmd *);
+       void (*put_session)(struct se_session *);
        /*
         * Called with spin_lock_bh(struct se_portal_group->session_lock held.
         */
index 1480900c511ce134443e7bf2f41d8e5ba0f97117..d274734b2aa42fee56d7ce7ab2b7898d39521e7e 100644 (file)
@@ -289,6 +289,7 @@ TRACE_EVENT(rcu_dyntick,
  *     "In holdoff": Nothing to do, holding off after unsuccessful attempt.
  *     "Begin holdoff": Attempt failed, don't retry until next jiffy.
  *     "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ *     "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
  *     "More callbacks": Still more callbacks, try again to clear them out.
  *     "Callbacks drained": All callbacks processed, off to dyntick idle!
  *     "Timer": Timer fired to cause CPU to continue processing callbacks.
diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h
new file mode 100644 (file)
index 0000000..609efe8
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_VIDEO_AUO_K190XFB_H_
+#define _LINUX_VIDEO_AUO_K190XFB_H_
+
+/* Controller standby command needs a param */
+#define AUOK190X_QUIRK_STANDBYPARAM    (1 << 0)
+
+/* Controller standby is completely broken */
+#define AUOK190X_QUIRK_STANDBYBROKEN   (1 << 1)
+
+/*
+ * Resolutions for the displays
+ */
+#define AUOK190X_RESOLUTION_800_600            0
+#define AUOK190X_RESOLUTION_1024_768           1
+
+/*
+ * struct used by auok190x. board specific stuff comes from *board
+ */
+struct auok190xfb_par {
+       struct fb_info *info;
+       struct auok190x_board *board;
+
+       struct regulator *regulator;
+
+       struct mutex io_lock;
+       struct delayed_work work;
+       wait_queue_head_t waitq;
+       int resolution;
+       int rotation;
+       int consecutive_threshold;
+       int update_cnt;
+
+       /* panel and controller informations */
+       int epd_type;
+       int panel_size_int;
+       int panel_size_float;
+       int panel_model;
+       int tcon_version;
+       int lut_version;
+
+       /* individual controller callbacks */
+       void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+       void (*update_all)(struct auok190xfb_par *par);
+       bool (*need_refresh)(struct auok190xfb_par *par);
+       void (*init)(struct auok190xfb_par *par);
+       void (*recover)(struct auok190xfb_par *par);
+
+       int update_mode; /* mode to use for updates */
+       int last_mode; /* update mode last used */
+       int flash;
+
+       /* power management */
+       int autosuspend_delay;
+       bool standby;
+       bool manual_standby;
+};
+
+/**
+ * Board specific platform-data
+ * @init:              initialize the controller interface
+ * @cleanup:           cleanup the controller interface
+ * @wait_for_rdy:      wait until the controller is not busy anymore
+ * @set_ctl:           change an interface control
+ * @set_hdb:           write a value to the data register
+ * @get_hdb:           read a value from the data register
+ * @setup_irq:         method to setup the irq handling on the busy gpio
+ * @gpio_nsleep:       sleep gpio
+ * @gpio_nrst:         reset gpio
+ * @gpio_nbusy:                busy gpio
+ * @resolution:                one of the AUOK190X_RESOLUTION constants
+ * @rotation:          rotation of the framebuffer
+ * @quirks:            controller quirks to honor
+ * @fps:               frames per second for defio
+ */
+struct auok190x_board {
+       int (*init)(struct auok190xfb_par *);
+       void (*cleanup)(struct auok190xfb_par *);
+       int (*wait_for_rdy)(struct auok190xfb_par *);
+
+       void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8);
+       void (*set_hdb)(struct auok190xfb_par *, u16);
+       u16 (*get_hdb)(struct auok190xfb_par *);
+
+       int (*setup_irq)(struct fb_info *);
+
+       int gpio_nsleep;
+       int gpio_nrst;
+       int gpio_nbusy;
+
+       int resolution;
+       int rotation;
+       int quirks;
+       int fps;
+};
+
+#endif
index 8847a9d6dd42db8d878a1235eef4e92019bad883..bd8cabd344db7242dc61bb5643dee70c6d349e09 100644 (file)
@@ -14,7 +14,7 @@
 
 #define DP_TIMEOUT_LOOP_COUNT 100
 #define MAX_CR_LOOP 5
-#define MAX_EQ_LOOP 4
+#define MAX_EQ_LOOP 5
 
 enum link_rate_type {
        LINK_RATE_1_62GBPS = 0x06,
index 772c770535f1ee02075b632a4bb1ca91bda078dc..83ce5e667d471077cdeda3ccfd9c4945bd1f5b04 100644 (file)
@@ -315,6 +315,7 @@ struct mipi_dsim_lcd_device {
        int                     id;
        int                     bus_id;
        int                     irq;
+       int                     panel_reverse;
 
        struct mipi_dsim_device *master;
        void                    *platform_data;
index 1c46a14341dd9892816a4398a274a7aa43cb5db1..c8e59b4a3364264df1719090162f19cf0b250dd8 100644 (file)
@@ -51,6 +51,8 @@
 
 struct omap_dss_device;
 struct omap_overlay_manager;
+struct snd_aes_iec958;
+struct snd_cea_861_aud_if;
 
 enum omap_display_type {
        OMAP_DISPLAY_TYPE_NONE          = 0,
@@ -158,6 +160,13 @@ enum omap_dss_display_state {
        OMAP_DSS_DISPLAY_SUSPENDED,
 };
 
+enum omap_dss_audio_state {
+       OMAP_DSS_AUDIO_DISABLED = 0,
+       OMAP_DSS_AUDIO_ENABLED,
+       OMAP_DSS_AUDIO_CONFIGURED,
+       OMAP_DSS_AUDIO_PLAYING,
+};
+
 /* XXX perhaps this should be removed */
 enum omap_dss_overlay_managers {
        OMAP_DSS_OVL_MGR_LCD,
@@ -166,8 +175,9 @@ enum omap_dss_overlay_managers {
 };
 
 enum omap_dss_rotation_type {
-       OMAP_DSS_ROT_DMA = 0,
-       OMAP_DSS_ROT_VRFB = 1,
+       OMAP_DSS_ROT_DMA        = 1 << 0,
+       OMAP_DSS_ROT_VRFB       = 1 << 1,
+       OMAP_DSS_ROT_TILER      = 1 << 2,
 };
 
 /* clockwise rotation angle */
@@ -309,6 +319,7 @@ struct omap_dss_board_info {
        struct omap_dss_device *default_device;
        int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
        void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
+       int (*set_min_bus_tput)(struct device *dev, unsigned long r);
 };
 
 /* Init with the board info */
@@ -316,11 +327,6 @@ extern int omap_display_init(struct omap_dss_board_info *board_data);
 /* HDMI mux init*/
 extern int omap_hdmi_init(enum omap_hdmi_flags flags);
 
-struct omap_display_platform_data {
-       struct omap_dss_board_info *board_data;
-       /* TODO: Additional members to be added when PM is considered */
-};
-
 struct omap_video_timings {
        /* Unit: pixels */
        u16 x_res;
@@ -587,6 +593,8 @@ struct omap_dss_device {
 
        enum omap_dss_display_state state;
 
+       enum omap_dss_audio_state audio_state;
+
        /* platform specific  */
        int (*platform_enable)(struct omap_dss_device *dssdev);
        void (*platform_disable)(struct omap_dss_device *dssdev);
@@ -599,6 +607,11 @@ struct omap_dss_hdmi_data
        int hpd_gpio;
 };
 
+struct omap_dss_audio {
+       struct snd_aes_iec958 *iec;
+       struct snd_cea_861_aud_if *cea;
+};
+
 struct omap_dss_driver {
        struct device_driver driver;
 
@@ -646,6 +659,24 @@ struct omap_dss_driver {
 
        int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
        bool (*detect)(struct omap_dss_device *dssdev);
+
+       /*
+        * For display drivers that support audio. This encompasses
+        * HDMI and DisplayPort at the moment.
+        */
+       /*
+        * Note: These functions might sleep. Do not call while
+        * holding a spinlock/readlock.
+        */
+       int (*audio_enable)(struct omap_dss_device *dssdev);
+       void (*audio_disable)(struct omap_dss_device *dssdev);
+       bool (*audio_supported)(struct omap_dss_device *dssdev);
+       int (*audio_config)(struct omap_dss_device *dssdev,
+               struct omap_dss_audio *audio);
+       /* Note: These functions may not sleep */
+       int (*audio_start)(struct omap_dss_device *dssdev);
+       void (*audio_stop)(struct omap_dss_device *dssdev);
+
 };
 
 int omap_dss_register_driver(struct omap_dss_driver *);
@@ -670,6 +701,8 @@ struct omap_overlay *omap_dss_get_overlay(int num);
 void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres);
 int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings);
 
 typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
 int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
index 728f9de9c258255d868709fc8ae2eac8987c682a..63d20efa254a045d53993e146e6e2f33adf50c16 100644 (file)
@@ -18,9 +18,11 @@ struct clk;
 /*
  * flags format
  *
- * 0x0000000A
+ * 0x00000CBA
  *
  * A: Audio source select
+ * B: Int output option
+ * C: Chip specific option
  */
 
 /* Audio source select */
@@ -30,6 +32,14 @@ struct clk;
 #define HDMI_SND_SRC_DSD       (2 << 0)
 #define HDMI_SND_SRC_HBR       (3 << 0)
 
+/* Int output option */
+#define HDMI_OUTPUT_PUSH_PULL  (1 << 4) /* System control : output mode */
+#define HDMI_OUTPUT_POLARITY_HI        (1 << 5) /* System control : output polarity */
+
+/* Chip specific option */
+#define HDMI_32BIT_REG         (1 << 8)
+#define HDMI_HAS_HTOP1         (1 << 9)
+
 struct sh_mobile_hdmi_info {
        unsigned int                     flags;
        long (*clk_optimize_parent)(unsigned long target, unsigned long *best_freq,
index 1e004d0574689c225526290d2a4cc82fe95287b6..d07dcf9fc8a9a8f05a570298d106c29e28f7167f 100644 (file)
@@ -167,7 +167,7 @@ config KERNEL_BZIP2
        depends on HAVE_KERNEL_BZIP2
        help
          Its compression ratio and speed is intermediate.
-         Decompression speed is slowest among the three.  The kernel
+         Decompression speed is slowest among the choices.  The kernel
          size is about 10% smaller with bzip2, in comparison to gzip.
          Bzip2 uses a large amount of memory. For modern kernels you
          will need at least 8MB RAM or more for booting.
@@ -176,10 +176,9 @@ config KERNEL_LZMA
        bool "LZMA"
        depends on HAVE_KERNEL_LZMA
        help
-         The most recent compression algorithm.
-         Its ratio is best, decompression speed is between the other
-         two. Compression is slowest.  The kernel size is about 33%
-         smaller with LZMA in comparison to gzip.
+         This compression algorithm's ratio is best.  Decompression speed
+         is between gzip and bzip2.  Compression is slowest.
+         The kernel size is about 33% smaller with LZMA in comparison to gzip.
 
 config KERNEL_XZ
        bool "XZ"
@@ -200,7 +199,7 @@ config KERNEL_LZO
        bool "LZO"
        depends on HAVE_KERNEL_LZO
        help
-         Its compression ratio is the poorest among the 4. The kernel
+         Its compression ratio is the poorest among the choices. The kernel
          size is about 10% bigger than gzip; however its speed
          (both compression and decompression) is the fastest.
 
index 42b0707c348108b98f6ce05ae1a98ad4f64a0e86..d3f0aeed2d39fe06aa07cb4147f747af8b7597ee 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/ctype.h>
@@ -330,7 +340,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
        if (err)
                return err;
 
-       sys_chdir((const char __user __force *)"/root");
+       sys_chdir("/root");
        s = current->fs->pwd.dentry->d_sb;
        ROOT_DEV = s->s_dev;
        printk(KERN_INFO
@@ -556,5 +566,5 @@ void __init prepare_namespace(void)
 out:
        devtmpfs_mount("dev");
        sys_mount(".", "/", NULL, MS_MOVE, NULL);
-       sys_chroot((const char __user __force *)".");
+       sys_chroot(".");
 }
index 9047330c73e9b8fed1098131513d6a8a1bdf0f8b..135959a276bef21628556119247be2e4d0d83610 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/unistd.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
index 32c4799b8c91bb483f418cd6a39e1e11eda0cd7a..8cb6db54285ba64f81af9ba2b388a7c216ceec61 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/delay.h>
 #include <linux/raid/md_u.h>
 #include <linux/raid/md_p.h>
@@ -283,7 +293,7 @@ static void __init autodetect_raid(void)
 
        wait_for_device_probe();
 
-       fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
+       fd = sys_open("/dev/md0", 0, 0);
        if (fd >= 0) {
                sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
                sys_close(fd);
index 6212586df29ace81e239b71d01b95826cb636507..6be2879cca66971859b1c51ff80451616ab13975 100644 (file)
@@ -1,3 +1,12 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
 
 #include <linux/kernel.h>
 #include <linux/fs.h>
@@ -181,7 +190,7 @@ int __init rd_load_image(char *from)
        char rotator[4] = { '|' , '/' , '-' , '\\' };
 #endif
 
-       out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0);
+       out_fd = sys_open("/dev/ram", O_RDWR, 0);
        if (out_fd < 0)
                goto out;
 
@@ -280,7 +289,7 @@ noclose_input:
        sys_close(out_fd);
 out:
        kfree(buf);
-       sys_unlink((const char __user __force *) "/dev/ram");
+       sys_unlink("/dev/ram");
        return res;
 }
 
index 8216c303b0821b15f1a353a2fea7af84abb8f4bf..84c6bf111300878a095a9fb3f8f91678dbe10b65 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
@@ -74,7 +84,7 @@ static void __init free_hash(void)
        }
 }
 
-static long __init do_utime(char __user *filename, time_t mtime)
+static long __init do_utime(char *filename, time_t mtime)
 {
        struct timespec t[2];
 
@@ -529,7 +539,7 @@ static void __init clean_rootfs(void)
        struct linux_dirent64 *dirp;
        int num;
 
-       fd = sys_open((const char __user __force *) "/", O_RDONLY, 0);
+       fd = sys_open("/", O_RDONLY, 0);
        WARN_ON(fd < 0);
        if (fd < 0)
                return;
@@ -589,7 +599,7 @@ static int __init populate_rootfs(void)
                }
                printk(KERN_INFO "rootfs image is not initramfs (%s)"
                                "; looks like an initrd\n", err);
-               fd = sys_open((const char __user __force *) "/initrd.image",
+               fd = sys_open("/initrd.image",
                              O_WRONLY|O_CREAT, 0700);
                if (fd >= 0) {
                        sys_write(fd, (char *)initrd_start,
index 1ca6b32c482875cbfd90fb315831c4c110d28414..b5cc0a7c4708f167925aa1f974eb356f99de1ebc 100644 (file)
@@ -508,7 +508,7 @@ asmlinkage void __init start_kernel(void)
        parse_early_param();
        parse_args("Booting kernel", static_command_line, __start___param,
                   __stop___param - __start___param,
-                  0, 0, &unknown_bootoption);
+                  -1, -1, &unknown_bootoption);
 
        jump_label_init();
 
@@ -755,13 +755,8 @@ static void __init do_initcalls(void)
 {
        int level;
 
-       for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
-               pr_info("initlevel:%d=%s, %d registered initcalls\n",
-                       level, initcall_level_names[level],
-                       (int) (initcall_levels[level+1]
-                               - initcall_levels[level]));
+       for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
                do_initcall_level(level);
-       }
 }
 
 /*
index 0c09366b96f3a634365c945c1a2d987a5afbf55e..383d638340b8417c8e31f53b935f8e8d833b2707 100644 (file)
 #include <linux/ipc_namespace.h>
 #include <linux/sysctl.h>
 
-/*
- * Define the ranges various user-specified maximum values can
- * be set to.
- */
-#define MIN_MSGMAX     1               /* min value for msg_max */
-#define MAX_MSGMAX     HARD_MSGMAX     /* max value for msg_max */
-#define MIN_MSGSIZEMAX 128             /* min value for msgsize_max */
-#define MAX_MSGSIZEMAX (8192*128)      /* max value for msgsize_max */
-
 #ifdef CONFIG_PROC_SYSCTL
 static void *get_mq(ctl_table *table)
 {
@@ -31,16 +22,6 @@ static void *get_mq(ctl_table *table)
        return which;
 }
 
-static int proc_mq_dointvec(ctl_table *table, int write,
-       void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct ctl_table mq_table;
-       memcpy(&mq_table, table, sizeof(mq_table));
-       mq_table.data = get_mq(table);
-
-       return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
-}
-
 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -52,15 +33,17 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
                                        lenp, ppos);
 }
 #else
-#define proc_mq_dointvec NULL
 #define proc_mq_dointvec_minmax NULL
 #endif
 
+static int msg_queues_limit_min = MIN_QUEUESMAX;
+static int msg_queues_limit_max = HARD_QUEUESMAX;
+
 static int msg_max_limit_min = MIN_MSGMAX;
-static int msg_max_limit_max = MAX_MSGMAX;
+static int msg_max_limit_max = HARD_MSGMAX;
 
 static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
-static int msg_maxsize_limit_max = MAX_MSGSIZEMAX;
+static int msg_maxsize_limit_max = HARD_MSGSIZEMAX;
 
 static ctl_table mq_sysctls[] = {
        {
@@ -68,7 +51,9 @@ static ctl_table mq_sysctls[] = {
                .data           = &init_ipc_ns.mq_queues_max,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_mq_dointvec,
+               .proc_handler   = proc_mq_dointvec_minmax,
+               .extra1         = &msg_queues_limit_min,
+               .extra2         = &msg_queues_limit_max,
        },
        {
                .procname       = "msg_max",
@@ -88,6 +73,24 @@ static ctl_table mq_sysctls[] = {
                .extra1         = &msg_maxsize_limit_min,
                .extra2         = &msg_maxsize_limit_max,
        },
+       {
+               .procname       = "msg_default",
+               .data           = &init_ipc_ns.mq_msg_default,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_mq_dointvec_minmax,
+               .extra1         = &msg_max_limit_min,
+               .extra2         = &msg_max_limit_max,
+       },
+       {
+               .procname       = "msgsize_default",
+               .data           = &init_ipc_ns.mq_msgsize_default,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_mq_dointvec_minmax,
+               .extra1         = &msg_maxsize_limit_min,
+               .extra2         = &msg_maxsize_limit_max,
+       },
        {}
 };
 
index a2757d4ab7734dd7f177e695f66f454964e8222d..8ce57691e7b60994d9cc97620b7550c603df5391 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mqueue.h>
 #include <linux/msg.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 #include <linux/netlink.h>
 #include <linux/syscalls.h>
 #include <linux/audit.h>
 #define STATE_PENDING  1
 #define STATE_READY    2
 
+struct posix_msg_tree_node {
+       struct rb_node          rb_node;
+       struct list_head        msg_list;
+       int                     priority;
+};
+
 struct ext_wait_queue {                /* queue of sleeping tasks */
        struct task_struct *task;
        struct list_head list;
@@ -61,7 +68,8 @@ struct mqueue_inode_info {
        struct inode vfs_inode;
        wait_queue_head_t wait_q;
 
-       struct msg_msg **messages;
+       struct rb_root msg_tree;
+       struct posix_msg_tree_node *node_cache;
        struct mq_attr attr;
 
        struct sigevent notify;
@@ -109,6 +117,103 @@ static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
        return ns;
 }
 
+/* Auxiliary functions to manipulate messages' list */
+static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
+{
+       struct rb_node **p, *parent = NULL;
+       struct posix_msg_tree_node *leaf;
+
+       p = &info->msg_tree.rb_node;
+       while (*p) {
+               parent = *p;
+               leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+
+               if (likely(leaf->priority == msg->m_type))
+                       goto insert_msg;
+               else if (msg->m_type < leaf->priority)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
+       if (info->node_cache) {
+               leaf = info->node_cache;
+               info->node_cache = NULL;
+       } else {
+               leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
+               if (!leaf)
+                       return -ENOMEM;
+               rb_init_node(&leaf->rb_node);
+               INIT_LIST_HEAD(&leaf->msg_list);
+               info->qsize += sizeof(*leaf);
+       }
+       leaf->priority = msg->m_type;
+       rb_link_node(&leaf->rb_node, parent, p);
+       rb_insert_color(&leaf->rb_node, &info->msg_tree);
+insert_msg:
+       info->attr.mq_curmsgs++;
+       info->qsize += msg->m_ts;
+       list_add_tail(&msg->m_list, &leaf->msg_list);
+       return 0;
+}
+
+static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
+{
+       struct rb_node **p, *parent = NULL;
+       struct posix_msg_tree_node *leaf;
+       struct msg_msg *msg;
+
+try_again:
+       p = &info->msg_tree.rb_node;
+       while (*p) {
+               parent = *p;
+               /*
+                * During insert, low priorities go to the left and high to the
+                * right.  On receive, we want the highest priorities first, so
+                * walk all the way to the right.
+                */
+               p = &(*p)->rb_right;
+       }
+       if (!parent) {
+               if (info->attr.mq_curmsgs) {
+                       pr_warn_once("Inconsistency in POSIX message queue, "
+                                    "no tree element, but supposedly messages "
+                                    "should exist!\n");
+                       info->attr.mq_curmsgs = 0;
+               }
+               return NULL;
+       }
+       leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+       if (unlikely(list_empty(&leaf->msg_list))) {
+               pr_warn_once("Inconsistency in POSIX message queue, "
+                            "empty leaf node but we haven't implemented "
+                            "lazy leaf delete!\n");
+               rb_erase(&leaf->rb_node, &info->msg_tree);
+               if (info->node_cache) {
+                       info->qsize -= sizeof(*leaf);
+                       kfree(leaf);
+               } else {
+                       info->node_cache = leaf;
+               }
+               goto try_again;
+       } else {
+               msg = list_first_entry(&leaf->msg_list,
+                                      struct msg_msg, m_list);
+               list_del(&msg->m_list);
+               if (list_empty(&leaf->msg_list)) {
+                       rb_erase(&leaf->rb_node, &info->msg_tree);
+                       if (info->node_cache) {
+                               info->qsize -= sizeof(*leaf);
+                               kfree(leaf);
+                       } else {
+                               info->node_cache = leaf;
+                       }
+               }
+       }
+       info->attr.mq_curmsgs--;
+       info->qsize -= msg->m_ts;
+       return msg;
+}
+
 static struct inode *mqueue_get_inode(struct super_block *sb,
                struct ipc_namespace *ipc_ns, umode_t mode,
                struct mq_attr *attr)
@@ -129,7 +234,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
 
        if (S_ISREG(mode)) {
                struct mqueue_inode_info *info;
-               unsigned long mq_bytes, mq_msg_tblsz;
+               unsigned long mq_bytes, mq_treesize;
 
                inode->i_fop = &mqueue_file_operations;
                inode->i_size = FILENT_SIZE;
@@ -143,20 +248,36 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
                info->notify_user_ns = NULL;
                info->qsize = 0;
                info->user = NULL;      /* set when all is ok */
+               info->msg_tree = RB_ROOT;
+               info->node_cache = NULL;
                memset(&info->attr, 0, sizeof(info->attr));
-               info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
-               info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
+               info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+                                          ipc_ns->mq_msg_default);
+               info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+                                           ipc_ns->mq_msgsize_default);
                if (attr) {
                        info->attr.mq_maxmsg = attr->mq_maxmsg;
                        info->attr.mq_msgsize = attr->mq_msgsize;
                }
-               mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
-               info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
-               if (!info->messages)
-                       goto out_inode;
+               /*
+                * We used to allocate a static array of pointers and account
+                * the size of that array as well as one msg_msg struct per
+                * possible message into the queue size. That's no longer
+                * accurate as the queue is now an rbtree and will grow and
+                * shrink depending on usage patterns.  We can, however, still
+                * account one msg_msg struct per message, but the nodes are
+                * allocated depending on priority usage, and most programs
+                * only use one, or a handful, of priorities.  However, since
+                * this is pinned memory, we need to assume worst case, so
+                * that means the min(mq_maxmsg, max_priorities) * struct
+                * posix_msg_tree_node.
+                */
+               mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+                       min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+                       sizeof(struct posix_msg_tree_node);
 
-               mq_bytes = (mq_msg_tblsz +
-                       (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+               mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                         info->attr.mq_msgsize);
 
                spin_lock(&mq_lock);
                if (u->mq_bytes + mq_bytes < u->mq_bytes ||
@@ -247,9 +368,9 @@ static void mqueue_evict_inode(struct inode *inode)
 {
        struct mqueue_inode_info *info;
        struct user_struct *user;
-       unsigned long mq_bytes;
-       int i;
+       unsigned long mq_bytes, mq_treesize;
        struct ipc_namespace *ipc_ns;
+       struct msg_msg *msg;
 
        clear_inode(inode);
 
@@ -259,14 +380,19 @@ static void mqueue_evict_inode(struct inode *inode)
        ipc_ns = get_ns_from_inode(inode);
        info = MQUEUE_I(inode);
        spin_lock(&info->lock);
-       for (i = 0; i < info->attr.mq_curmsgs; i++)
-               free_msg(info->messages[i]);
-       kfree(info->messages);
+       while ((msg = msg_get(info)) != NULL)
+               free_msg(msg);
+       kfree(info->node_cache);
        spin_unlock(&info->lock);
 
        /* Total amount of bytes accounted for the mqueue */
-       mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
-           + info->attr.mq_msgsize);
+       mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+               min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+               sizeof(struct posix_msg_tree_node);
+
+       mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                 info->attr.mq_msgsize);
+
        user = info->user;
        if (user) {
                spin_lock(&mq_lock);
@@ -300,8 +426,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
                error = -EACCES;
                goto out_unlock;
        }
-       if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
-                       !capable(CAP_SYS_RESOURCE)) {
+       if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
+           (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+            !capable(CAP_SYS_RESOURCE))) {
                error = -ENOSPC;
                goto out_unlock;
        }
@@ -485,26 +612,6 @@ static struct ext_wait_queue *wq_get_first_waiter(
        return list_entry(ptr, struct ext_wait_queue, list);
 }
 
-/* Auxiliary functions to manipulate messages' list */
-static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
-{
-       int k;
-
-       k = info->attr.mq_curmsgs - 1;
-       while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
-               info->messages[k + 1] = info->messages[k];
-               k--;
-       }
-       info->attr.mq_curmsgs++;
-       info->qsize += ptr->m_ts;
-       info->messages[k + 1] = ptr;
-}
-
-static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
-{
-       info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
-       return info->messages[info->attr.mq_curmsgs];
-}
 
 static inline void set_cookie(struct sk_buff *skb, char code)
 {
@@ -585,24 +692,30 @@ static void remove_notification(struct mqueue_inode_info *info)
 
 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
 {
+       int mq_treesize;
+       unsigned long total_size;
+
        if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
-               return 0;
+               return -EINVAL;
        if (capable(CAP_SYS_RESOURCE)) {
-               if (attr->mq_maxmsg > HARD_MSGMAX)
-                       return 0;
+               if (attr->mq_maxmsg > HARD_MSGMAX ||
+                   attr->mq_msgsize > HARD_MSGSIZEMAX)
+                       return -EINVAL;
        } else {
                if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
                                attr->mq_msgsize > ipc_ns->mq_msgsize_max)
-                       return 0;
+                       return -EINVAL;
        }
        /* check for overflow */
        if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
-               return 0;
-       if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
-           + sizeof (struct msg_msg *))) <
-           (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
-               return 0;
-       return 1;
+               return -EOVERFLOW;
+       mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
+               min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
+               sizeof(struct posix_msg_tree_node);
+       total_size = attr->mq_maxmsg * attr->mq_msgsize;
+       if (total_size + mq_treesize < total_size)
+               return -EOVERFLOW;
+       return 0;
 }
 
 /*
@@ -617,12 +730,21 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
        int ret;
 
        if (attr) {
-               if (!mq_attr_ok(ipc_ns, attr)) {
-                       ret = -EINVAL;
+               ret = mq_attr_ok(ipc_ns, attr);
+               if (ret)
                        goto out;
-               }
                /* store for use during create */
                dentry->d_fsdata = attr;
+       } else {
+               struct mq_attr def_attr;
+
+               def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+                                        ipc_ns->mq_msg_default);
+               def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+                                         ipc_ns->mq_msgsize_default);
+               ret = mq_attr_ok(ipc_ns, &def_attr);
+               if (ret)
+                       goto out;
        }
 
        mode &= ~current_umask();
@@ -837,7 +959,8 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
                wake_up_interruptible(&info->wait_q);
                return;
        }
-       msg_insert(sender->msg, info);
+       if (msg_insert(sender->msg, info))
+               return;
        list_del(&sender->list);
        sender->state = STATE_PENDING;
        wake_up_process(sender->task);
@@ -857,7 +980,8 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        struct mqueue_inode_info *info;
        ktime_t expires, *timeout = NULL;
        struct timespec ts;
-       int ret;
+       struct posix_msg_tree_node *new_leaf = NULL;
+       int ret = 0;
 
        if (u_abs_timeout) {
                int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -905,34 +1029,60 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        msg_ptr->m_ts = msg_len;
        msg_ptr->m_type = msg_prio;
 
+       /*
+        * msg_insert really wants us to have a valid, spare node struct so
+        * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+        * fall back to that if necessary.
+        */
+       if (!info->node_cache)
+               new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
        spin_lock(&info->lock);
 
+       if (!info->node_cache && new_leaf) {
+               /* Save our speculative allocation into the cache */
+               rb_init_node(&new_leaf->rb_node);
+               INIT_LIST_HEAD(&new_leaf->msg_list);
+               info->node_cache = new_leaf;
+               info->qsize += sizeof(*new_leaf);
+               new_leaf = NULL;
+       } else {
+               kfree(new_leaf);
+       }
+
        if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
                if (filp->f_flags & O_NONBLOCK) {
-                       spin_unlock(&info->lock);
                        ret = -EAGAIN;
                } else {
                        wait.task = current;
                        wait.msg = (void *) msg_ptr;
                        wait.state = STATE_NONE;
                        ret = wq_sleep(info, SEND, timeout, &wait);
+                       /*
+                        * wq_sleep must be called with info->lock held, and
+                        * returns with the lock released
+                        */
+                       goto out_free;
                }
-               if (ret < 0)
-                       free_msg(msg_ptr);
        } else {
                receiver = wq_get_first_waiter(info, RECV);
                if (receiver) {
                        pipelined_send(info, msg_ptr, receiver);
                } else {
                        /* adds message to the queue */
-                       msg_insert(msg_ptr, info);
+                       ret = msg_insert(msg_ptr, info);
+                       if (ret)
+                               goto out_unlock;
                        __do_notify(info);
                }
                inode->i_atime = inode->i_mtime = inode->i_ctime =
                                CURRENT_TIME;
-               spin_unlock(&info->lock);
-               ret = 0;
        }
+out_unlock:
+       spin_unlock(&info->lock);
+out_free:
+       if (ret)
+               free_msg(msg_ptr);
 out_fput:
        fput(filp);
 out:
@@ -951,6 +1101,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
        struct ext_wait_queue wait;
        ktime_t expires, *timeout = NULL;
        struct timespec ts;
+       struct posix_msg_tree_node *new_leaf = NULL;
 
        if (u_abs_timeout) {
                int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -986,7 +1137,26 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                goto out_fput;
        }
 
+       /*
+        * msg_insert really wants us to have a valid, spare node struct so
+        * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+        * fall back to that if necessary.
+        */
+       if (!info->node_cache)
+               new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
        spin_lock(&info->lock);
+
+       if (!info->node_cache && new_leaf) {
+               /* Save our speculative allocation into the cache */
+               rb_init_node(&new_leaf->rb_node);
+               INIT_LIST_HEAD(&new_leaf->msg_list);
+               info->node_cache = new_leaf;
+               info->qsize += sizeof(*new_leaf);
+       } else {
+               kfree(new_leaf);
+       }
+
        if (info->attr.mq_curmsgs == 0) {
                if (filp->f_flags & O_NONBLOCK) {
                        spin_unlock(&info->lock);
@@ -1251,6 +1421,8 @@ int mq_init_ns(struct ipc_namespace *ns)
        ns->mq_queues_max    = DFLT_QUEUESMAX;
        ns->mq_msg_max       = DFLT_MSGMAX;
        ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
+       ns->mq_msg_default   = DFLT_MSG;
+       ns->mq_msgsize_default  = DFLT_MSGSIZE;
 
        ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
        if (IS_ERR(ns->mq_mnt)) {
index 406c5b208193373b979ce82bffe6617250ea64ed..41c1285d697a6f19a3c9b945691346c9b45ade47 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -393,6 +393,16 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 }
 
+static long shm_fallocate(struct file *file, int mode, loff_t offset,
+                         loff_t len)
+{
+       struct shm_file_data *sfd = shm_file_data(file);
+
+       if (!sfd->file->f_op->fallocate)
+               return -EOPNOTSUPP;
+       return sfd->file->f_op->fallocate(file, mode, offset, len);
+}
+
 static unsigned long shm_get_unmapped_area(struct file *file,
        unsigned long addr, unsigned long len, unsigned long pgoff,
        unsigned long flags)
@@ -410,6 +420,7 @@ static const struct file_operations shm_file_operations = {
        .get_unmapped_area      = shm_get_unmapped_area,
 #endif
        .llseek         = noop_llseek,
+       .fallocate      = shm_fallocate,
 };
 
 static const struct file_operations shm_file_operations_huge = {
@@ -418,6 +429,7 @@ static const struct file_operations shm_file_operations_huge = {
        .release        = shm_release,
        .get_unmapped_area      = shm_get_unmapped_area,
        .llseek         = noop_llseek,
+       .fallocate      = shm_fallocate,
 };
 
 int is_file_shm_hugepages(struct file *file)
@@ -1036,6 +1048,10 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
        sfd->file = shp->shm_file;
        sfd->vm_ops = NULL;
 
+       err = security_mmap_file(file, prot, flags);
+       if (err)
+               goto out_fput;
+
        down_write(&current->mm->mmap_sem);
        if (addr && !(shmflg & SHM_REMAP)) {
                err = -EINVAL;
@@ -1050,7 +1066,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
                        goto invalid;
        }
                
-       user_addr = do_mmap (file, addr, size, prot, flags, 0);
+       user_addr = do_mmap_pgoff(file, addr, size, prot, flags, 0);
        *raddr = user_addr;
        err = 0;
        if (IS_ERR_VALUE(user_addr))
@@ -1058,6 +1074,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
 invalid:
        up_write(&current->mm->mmap_sem);
 
+out_fput:
        fput(file);
 
 out_nattch:
index 6c07f30fa9b7e678e23b2038474a5b508b7fa661..c0cc67ad764ceddbe9f226ee1bfb90c4055f19ff 100644 (file)
@@ -5,12 +5,12 @@
 obj-y     = fork.o exec_domain.o panic.o printk.o \
            cpu.o exit.o itimer.o time.o softirq.o resource.o \
            sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
-           signal.o sys.o kmod.o workqueue.o pid.o \
+           signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
            rcupdate.o extable.o params.o posix-timers.o \
            kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o cred.o \
-           async.o range.o groups.o
+           async.o range.o groups.o lglock.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
@@ -25,6 +25,9 @@ endif
 obj-y += sched/
 obj-y += power/
 
+ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
+obj-$(CONFIG_X86) += kcmp.o
+endif
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
index 0f3527d6184a1597fb81f00a878df15eeab0ecf8..b303dfc7dce0703299c3e8840e91d101497495ec 100644 (file)
@@ -255,12 +255,17 @@ int cgroup_lock_is_held(void)
 
 EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
 
+static int css_unbias_refcnt(int refcnt)
+{
+       return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
+}
+
 /* the current nr of refs, always >= 0 whether @css is deactivated or not */
 static int css_refcnt(struct cgroup_subsys_state *css)
 {
        int v = atomic_read(&css->refcnt);
 
-       return v >= 0 ? v : v - CSS_DEACT_BIAS;
+       return css_unbias_refcnt(v);
 }
 
 /* convenient tests for these bits */
@@ -3878,8 +3883,12 @@ static void css_dput_fn(struct work_struct *work)
 {
        struct cgroup_subsys_state *css =
                container_of(work, struct cgroup_subsys_state, dput_work);
+       struct dentry *dentry = css->cgroup->dentry;
+       struct super_block *sb = dentry->d_sb;
 
-       dput(css->cgroup->dentry);
+       atomic_inc(&sb->s_active);
+       dput(dentry);
+       deactivate_super(sb);
 }
 
 static void init_cgroup_css(struct cgroup_subsys_state *css,
@@ -4971,10 +4980,12 @@ EXPORT_SYMBOL_GPL(__css_tryget);
 void __css_put(struct cgroup_subsys_state *css)
 {
        struct cgroup *cgrp = css->cgroup;
+       int v;
 
        rcu_read_lock();
-       atomic_dec(&css->refcnt);
-       switch (css_refcnt(css)) {
+       v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
+
+       switch (v) {
        case 1:
                if (notify_on_release(cgrp)) {
                        set_bit(CGRP_RELEASABLE, &cgrp->flags);
index 0e6353cf147abf51c4f760054971915063ece928..a4eb5227a19e482eaf2b821c94de845228381bdd 100644 (file)
 #include <linux/sched.h>
 #include <linux/unistd.h>
 #include <linux/cpu.h>
+#include <linux/oom.h>
+#include <linux/rcupdate.h>
 #include <linux/export.h>
+#include <linux/bug.h>
 #include <linux/kthread.h>
 #include <linux/stop_machine.h>
 #include <linux/mutex.h>
@@ -173,6 +176,47 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_cpu_notifier);
 
+/**
+ * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
+ * @cpu: a CPU id
+ *
+ * This function walks all processes, finds a valid mm struct for each one and
+ * then clears a corresponding bit in mm's cpumask.  While this all sounds
+ * trivial, there are various non-obvious corner cases, which this function
+ * tries to solve in a safe manner.
+ *
+ * Also note that the function uses a somewhat relaxed locking scheme, so it may
+ * be called only for an already offlined CPU.
+ */
+void clear_tasks_mm_cpumask(int cpu)
+{
+       struct task_struct *p;
+
+       /*
+        * This function is called after the cpu is taken down and marked
+        * offline, so its not like new tasks will ever get this cpu set in
+        * their mm mask. -- Peter Zijlstra
+        * Thus, we may use rcu_read_lock() here, instead of grabbing
+        * full-fledged tasklist_lock.
+        */
+       WARN_ON(cpu_online(cpu));
+       rcu_read_lock();
+       for_each_process(p) {
+               struct task_struct *t;
+
+               /*
+                * Main thread might exit, but other threads may still have
+                * a valid mm. Find one.
+                */
+               t = find_lock_task_mm(p);
+               if (!t)
+                       continue;
+               cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
+               task_unlock(t);
+       }
+       rcu_read_unlock();
+}
+
 static inline void check_for_tasks(int cpu)
 {
        struct task_struct *p;
index 249152e15308c9d3a36922c2d10989a961e221d9..9656a3c36503dee343813149bbf1153bb6aea05a 100644 (file)
@@ -81,7 +81,7 @@ int cpu_pm_unregister_notifier(struct notifier_block *nb)
 EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
 
 /**
- * cpm_pm_enter - CPU low power entry notifier
+ * cpu_pm_enter - CPU low power entry notifier
  *
  * Notifies listeners that a single CPU is entering a low power state that may
  * cause some blocks in the same power domain as the cpu to reset.
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
  * Must be called on the affected CPU with interrupts disabled.  Platform is
  * responsible for ensuring that cpu_pm_enter is not called twice on the same
  * CPU before cpu_pm_exit is called. Notified drivers can include VFP
- * co-processor, interrupt controller and it's PM extensions, local CPU
+ * co-processor, interrupt controller and its PM extensions, local CPU
  * timers context save/restore which shouldn't be interrupted. Hence it
  * must be called with interrupts disabled.
  *
@@ -115,13 +115,13 @@ int cpu_pm_enter(void)
 EXPORT_SYMBOL_GPL(cpu_pm_enter);
 
 /**
- * cpm_pm_exit - CPU low power exit notifier
+ * cpu_pm_exit - CPU low power exit notifier
  *
  * Notifies listeners that a single CPU is exiting a low power state that may
  * have caused some blocks in the same power domain as the cpu to reset.
  *
  * Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Return conditions are same as __raw_notifier_call_chain.
@@ -139,7 +139,7 @@ int cpu_pm_exit(void)
 EXPORT_SYMBOL_GPL(cpu_pm_exit);
 
 /**
- * cpm_cluster_pm_enter - CPU cluster low power entry notifier
+ * cpu_cluster_pm_enter - CPU cluster low power entry notifier
  *
  * Notifies listeners that all cpus in a power domain are entering a low power
  * state that may cause some blocks in the same power domain to reset.
@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
  * Must be called after cpu_pm_enter has been called on all cpus in the power
  * domain, and before cpu_pm_exit has been called on any cpu in the power
  * domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Must be called with interrupts disabled.
@@ -174,7 +174,7 @@ int cpu_cluster_pm_enter(void)
 EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
 
 /**
- * cpm_cluster_pm_exit - CPU cluster low power exit notifier
+ * cpu_cluster_pm_exit - CPU cluster low power exit notifier
  *
  * Notifies listeners that all cpus in a power domain are exiting form a
  * low power state that may have caused some blocks in the same power domain
@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
  * Must be called after cpu_pm_exit has been called on all cpus in the power
  * domain, and before cpu_pm_exit has been called on any cpu in the power
  * domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Return conditions are same as __raw_notifier_call_chain.
index 430557ea488f3625243455afcdd6b2f9f481ac19..de728ac50d821b9f38340534a4ba6202137d55a2 100644 (file)
@@ -207,13 +207,6 @@ void exit_creds(struct task_struct *tsk)
        validate_creds(cred);
        alter_cred_subscribers(cred, -1);
        put_cred(cred);
-
-       cred = (struct cred *) tsk->replacement_session_keyring;
-       if (cred) {
-               tsk->replacement_session_keyring = NULL;
-               validate_creds(cred);
-               put_cred(cred);
-       }
 }
 
 /**
@@ -396,8 +389,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
        struct cred *new;
        int ret;
 
-       p->replacement_session_keyring = NULL;
-
        if (
 #ifdef CONFIG_KEYS
                !p->cred->thread_keyring &&
index 67b847dfa2bb64b58d30db51460f22243142ee2d..1f91413edb87d0c9b77efc4e84c9b2f9729e9b8c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/kmsg_dump.h>
 #include <linux/reboot.h>
 #include <linux/sched.h>
 #include <linux/sysrq.h>
@@ -2040,8 +2041,15 @@ static int kdb_env(int argc, const char **argv)
  */
 static int kdb_dmesg(int argc, const char **argv)
 {
-       char *syslog_data[4], *start, *end, c = '\0', *p;
-       int diag, logging, logsize, lines = 0, adjust = 0, n;
+       int diag;
+       int logging;
+       int lines = 0;
+       int adjust = 0;
+       int n = 0;
+       int skip = 0;
+       struct kmsg_dumper dumper = { .active = 1 };
+       size_t len;
+       char buf[201];
 
        if (argc > 2)
                return KDB_ARGCOUNT;
@@ -2064,22 +2072,10 @@ static int kdb_dmesg(int argc, const char **argv)
                kdb_set(2, setargs);
        }
 
-       /* syslog_data[0,1] physical start, end+1.  syslog_data[2,3]
-        * logical start, end+1. */
-       kdb_syslog_data(syslog_data);
-       if (syslog_data[2] == syslog_data[3])
-               return 0;
-       logsize = syslog_data[1] - syslog_data[0];
-       start = syslog_data[2];
-       end = syslog_data[3];
-#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
-       for (n = 0, p = start; p < end; ++p) {
-               c = *KDB_WRAP(p);
-               if (c == '\n')
-                       ++n;
-       }
-       if (c != '\n')
-               ++n;
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, 1, NULL, 0, NULL))
+               n++;
+
        if (lines < 0) {
                if (adjust >= n)
                        kdb_printf("buffer only contains %d lines, nothing "
@@ -2087,21 +2083,11 @@ static int kdb_dmesg(int argc, const char **argv)
                else if (adjust - lines >= n)
                        kdb_printf("buffer only contains %d lines, last %d "
                                   "lines printed\n", n, n - adjust);
-               if (adjust) {
-                       for (; start < end && adjust; ++start) {
-                               if (*KDB_WRAP(start) == '\n')
-                                       --adjust;
-                       }
-                       if (start < end)
-                               ++start;
-               }
-               for (p = start; p < end && lines; ++p) {
-                       if (*KDB_WRAP(p) == '\n')
-                               ++lines;
-               }
-               end = p;
+               skip = adjust;
+               lines = abs(lines);
        } else if (lines > 0) {
-               int skip = n - (adjust + lines);
+               skip = n - lines - adjust;
+               lines = abs(lines);
                if (adjust >= n) {
                        kdb_printf("buffer only contains %d lines, "
                                   "nothing printed\n", n);
@@ -2112,35 +2098,24 @@ static int kdb_dmesg(int argc, const char **argv)
                        kdb_printf("buffer only contains %d lines, first "
                                   "%d lines printed\n", n, lines);
                }
-               for (; start < end && skip; ++start) {
-                       if (*KDB_WRAP(start) == '\n')
-                               --skip;
-               }
-               for (p = start; p < end && lines; ++p) {
-                       if (*KDB_WRAP(p) == '\n')
-                               --lines;
-               }
-               end = p;
+       } else {
+               lines = n;
        }
-       /* Do a line at a time (max 200 chars) to reduce protocol overhead */
-       c = '\n';
-       while (start != end) {
-               char buf[201];
-               p = buf;
-               if (KDB_FLAG(CMD_INTERRUPT))
-                       return 0;
-               while (start < end && (c = *KDB_WRAP(start)) &&
-                      (p - buf) < sizeof(buf)-1) {
-                       ++start;
-                       *p++ = c;
-                       if (c == '\n')
-                               break;
+
+       if (skip >= n || skip < 0)
+               return 0;
+
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len)) {
+               if (skip) {
+                       skip--;
+                       continue;
                }
-               *p = '\0';
-               kdb_printf("%s", buf);
+               if (!lines--)
+                       break;
+
+               kdb_printf("%.*s\n", (int)len - 1, buf);
        }
-       if (c != '\n')
-               kdb_printf("\n");
 
        return 0;
 }
index 47c4e56e513ba72ec485afdaa2ad0e30bac6e30f..392ec6a25844c48f1242a649c0c58a1d22c0c783 100644 (file)
@@ -205,7 +205,6 @@ extern char kdb_grep_string[];
 extern int kdb_grep_leading;
 extern int kdb_grep_trailing;
 extern char *kdb_cmds[];
-extern void kdb_syslog_data(char *syslog_data[]);
 extern unsigned long kdb_task_state_string(const char *);
 extern char kdb_task_state_char (const struct task_struct *);
 extern unsigned long kdb_task_state(const struct task_struct *p,
index 5b06cbbf6931da0ab7c4e9f1b27a76f5418d64d8..d7d71d6ec97278cf60fa5b13e79a80bff27b6af2 100644 (file)
@@ -253,9 +253,9 @@ perf_cgroup_match(struct perf_event *event)
        return !event->cgrp || event->cgrp == cpuctx->cgrp;
 }
 
-static inline void perf_get_cgroup(struct perf_event *event)
+static inline bool perf_tryget_cgroup(struct perf_event *event)
 {
-       css_get(&event->cgrp->css);
+       return css_tryget(&event->cgrp->css);
 }
 
 static inline void perf_put_cgroup(struct perf_event *event)
@@ -484,7 +484,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
        event->cgrp = cgrp;
 
        /* must be done before we fput() the file */
-       perf_get_cgroup(event);
+       if (!perf_tryget_cgroup(event)) {
+               event->cgrp = NULL;
+               ret = -ENOENT;
+               goto out;
+       }
 
        /*
         * all events in a group must monitor
@@ -3181,7 +3185,6 @@ static void perf_event_for_each(struct perf_event *event,
        event = event->group_leader;
 
        perf_event_for_each_child(event, func);
-       func(event);
        list_for_each_entry(sibling, &event->sibling_list, group_entry)
                perf_event_for_each_child(sibling, func);
        mutex_unlock(&ctx->mutex);
index 910a0716e17ab4124ddd07f22d502e19ed9f30de..2f59cc334516a9b9e831bb54aa0375c999ae69b2 100644 (file)
@@ -72,6 +72,18 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
                list_del_rcu(&p->tasks);
                list_del_init(&p->sibling);
                __this_cpu_dec(process_counts);
+               /*
+                * If we are the last child process in a pid namespace to be
+                * reaped, notify the reaper sleeping zap_pid_ns_processes().
+                */
+               if (IS_ENABLED(CONFIG_PID_NS)) {
+                       struct task_struct *parent = p->real_parent;
+
+                       if ((task_active_pid_ns(parent)->child_reaper == parent) &&
+                           list_empty(&parent->children) &&
+                           (parent->flags & PF_EXITING))
+                               wake_up_process(parent);
+               }
        }
        list_del_rcu(&p->thread_group);
 }
@@ -643,6 +655,7 @@ static void exit_mm(struct task_struct * tsk)
        mm_release(tsk, mm);
        if (!mm)
                return;
+       sync_mm_rss(mm);
        /*
         * Serialize with any possible pending coredump.
         * We must hold mmap_sem around checking core_state
@@ -719,12 +732,6 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
 
                zap_pid_ns_processes(pid_ns);
                write_lock_irq(&tasklist_lock);
-               /*
-                * We can not clear ->child_reaper or leave it alone.
-                * There may by stealth EXIT_DEAD tasks on ->children,
-                * forget_original_parent() must move them somewhere.
-                */
-               pid_ns->child_reaper = init_pid_ns.child_reaper;
        } else if (father->signal->has_child_subreaper) {
                struct task_struct *reaper;
 
@@ -884,9 +891,9 @@ static void check_stack_usage(void)
 
        spin_lock(&low_water_lock);
        if (free < lowest_to_date) {
-               printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
-                               "left\n",
-                               current->comm, free);
+               printk(KERN_WARNING "%s (%d) used greatest stack depth: "
+                               "%lu bytes left\n",
+                               current->comm, task_pid_nr(current), free);
                lowest_to_date = free;
        }
        spin_unlock(&low_water_lock);
@@ -946,12 +953,13 @@ void do_exit(long code)
        exit_signals(tsk);  /* sets PF_EXITING */
        /*
         * tsk->flags are checked in the futex code to protect against
-        * an exiting task cleaning up the robust pi futexes.
+        * an exiting task cleaning up the robust pi futexes, and in
+        * task_work_add() to avoid the race with exit_task_work().
         */
        smp_mb();
        raw_spin_unlock_wait(&tsk->pi_lock);
 
-       exit_irq_thread();
+       exit_task_work(tsk);
 
        if (unlikely(in_atomic()))
                printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -1214,7 +1222,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
        unsigned long state;
        int retval, status, traced;
        pid_t pid = task_pid_vnr(p);
-       uid_t uid = from_kuid_munged(current_user_ns(), __task_cred(p)->uid);
+       uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
        struct siginfo __user *infop;
 
        if (!likely(wo->wo_flags & WEXITED))
index 31a32c7dd16953c8167914a3ec4f84fce23f3c20..f00e319d8376289f0273fc1d301f4c1ebb132c86 100644 (file)
@@ -304,12 +304,17 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        }
 
        err = arch_dup_task_struct(tsk, orig);
-       if (err)
-               goto out;
 
+       /*
+        * We defer looking at err, because we will need this setup
+        * for the clean up path to work correctly.
+        */
        tsk->stack = ti;
-
        setup_thread_stack(tsk, orig);
+
+       if (err)
+               goto out;
+
        clear_user_return_notifier(tsk);
        clear_tsk_need_resched(tsk);
        stackend = end_of_stack(tsk);
@@ -787,9 +792,6 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        /* Get rid of any cached register state */
        deactivate_mm(tsk, mm);
 
-       if (tsk->vfork_done)
-               complete_vfork_done(tsk);
-
        /*
         * If we're exiting normally, clear a user-space tid field if
         * requested.  We leave this alone when dying by signal, to leave
@@ -810,6 +812,13 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
                }
                tsk->clear_child_tid = NULL;
        }
+
+       /*
+        * All done, finally we can wake up parent and return this mm to him.
+        * Also kthread_stop() uses this completion for synchronization.
+        */
+       if (tsk->vfork_done)
+               complete_vfork_done(tsk);
 }
 
 /*
@@ -1411,6 +1420,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
         */
        p->group_leader = p;
        INIT_LIST_HEAD(&p->thread_group);
+       INIT_HLIST_HEAD(&p->task_works);
 
        /* Now that the task is set up, run cgroup callbacks if
         * necessary. We need to run them before the task is visible
index ae34bf51682b4a204de93f62943055350cd5c4d0..6db7a5ed52b58727d33293853053c0fee1d049fe 100644 (file)
@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
        return 0;
 }
 
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+
+       return ktime_get_update_offsets(offs_real, offs_boot);
+}
+
 /*
  * Retrigger next event is called after clock was set
  *
@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
-       struct timespec realtime_offset, xtim, wtm, sleep;
 
        if (!hrtimer_hres_active())
                return;
 
-       /* Optimized out for !HIGH_RES */
-       get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
-       set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
-
-       /* Adjust CLOCK_REALTIME offset */
        raw_spin_lock(&base->lock);
-       base->clock_base[HRTIMER_BASE_REALTIME].offset =
-               timespec_to_ktime(realtime_offset);
-       base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
-               timespec_to_ktime(sleep);
-
+       hrtimer_update_base(base);
        hrtimer_force_reprogram(base, 0);
        raw_spin_unlock(&base->lock);
 }
@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
                base->clock_base[i].resolution = KTIME_HIGH_RES;
 
        tick_setup_sched_timer();
-
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
        local_irq_restore(flags);
        return 1;
 }
 
+/*
+ * Called from timekeeping code to reprogramm the hrtimer interrupt
+ * device. If called from the timer interrupt context we defer it to
+ * softirq context.
+ */
+void clock_was_set_delayed(void)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       cpu_base->clock_was_set = 1;
+       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+}
+
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        cpu_base->nr_events++;
        dev->next_event.tv64 = KTIME_MAX;
 
-       entry_time = now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
 retry:
        expires_next.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
@@ -1330,8 +1339,12 @@ retry:
         * We need to prevent that we loop forever in the hrtimer
         * interrupt routine. We give it 3 attempts to avoid
         * overreacting on some spurious event.
+        *
+        * Acquire base lock for updating the offsets and retrieving
+        * the current time.
         */
-       now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
        cpu_base->nr_retries++;
        if (++retries < 3)
                goto retry;
@@ -1343,6 +1356,7 @@ retry:
         */
        cpu_base->nr_hangs++;
        cpu_base->hang_detected = 1;
+       raw_spin_unlock(&cpu_base->lock);
        delta = ktime_sub(now, entry_time);
        if (delta.tv64 > cpu_base->max_hang_time.tv64)
                cpu_base->max_hang_time = delta;
@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
 
 static void run_hrtimer_softirq(struct softirq_action *h)
 {
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       if (cpu_base->clock_was_set) {
+               cpu_base->clock_was_set = 0;
+               clock_was_set();
+       }
+
        hrtimer_peek_ahead_timers();
 }
 
index fc275e4f629b941ef0905a77bc4b88b8659b3183..eebd6d5cfb44ce626f669029750112a0cad15525 100644 (file)
@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq)
        kstat_incr_irqs_this_cpu(irq, desc);
 
        action = desc->action;
-       if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
+       if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
                goto out_unlock;
+       }
 
        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
        raw_spin_unlock_irq(&desc->lock);
@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
        kstat_incr_irqs_this_cpu(irq, desc);
 
-       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
+       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
                goto out_unlock;
+       }
 
        handle_irq_event(desc);
 
index 8e5c56b3b7d9c5f70c49d48f7082636b1b3bdf89..001fa5bab4902dcf403384ded269841c09d3ca6a 100644 (file)
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
+extern int irq_do_set_affinity(struct irq_data *data,
+                              const struct cpumask *dest, bool force);
+
 /* Inline functions for support of irq chips on slow busses */
 static inline void chip_bus_lock(struct irq_desc *desc)
 {
index bb32326afe8796be6ebbf240fbd3ef4bb1fc8cbe..8c548232ba39daf0f7320d64f19d85775000201a 100644 (file)
@@ -7,6 +7,8 @@
  * This file contains driver APIs to the irq subsystem.
  */
 
+#define pr_fmt(fmt) "genirq: " fmt
+
 #include <linux/irq.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
@@ -14,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/task_work.h>
 
 #include "internals.h"
 
@@ -139,6 +142,25 @@ static inline void
 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 #endif
 
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                       bool force)
+{
+       struct irq_desc *desc = irq_data_to_desc(data);
+       struct irq_chip *chip = irq_data_get_irq_chip(data);
+       int ret;
+
+       ret = chip->irq_set_affinity(data, mask, false);
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               cpumask_copy(data->affinity, mask);
+       case IRQ_SET_MASK_OK_NOCOPY:
+               irq_set_thread_affinity(desc);
+               ret = 0;
+       }
+
+       return ret;
+}
+
 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -149,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
                return -EINVAL;
 
        if (irq_can_move_pcntxt(data)) {
-               ret = chip->irq_set_affinity(data, mask, false);
-               switch (ret) {
-               case IRQ_SET_MASK_OK:
-                       cpumask_copy(data->affinity, mask);
-               case IRQ_SET_MASK_OK_NOCOPY:
-                       irq_set_thread_affinity(desc);
-                       ret = 0;
-               }
+               ret = irq_do_set_affinity(data, mask, false);
        } else {
                irqd_set_move_pending(data);
                irq_copy_pending(desc, mask);
@@ -280,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 static int
 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
-       struct irq_chip *chip = irq_desc_get_chip(desc);
        struct cpumask *set = irq_default_affinity;
-       int ret, node = desc->irq_data.node;
+       int node = desc->irq_data.node;
 
        /* Excludes PER_CPU and NO_BALANCE interrupts */
        if (!irq_can_set_affinity(irq))
@@ -308,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
                if (cpumask_intersects(mask, nodemask))
                        cpumask_and(mask, mask, nodemask);
        }
-       ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
-       switch (ret) {
-       case IRQ_SET_MASK_OK:
-               cpumask_copy(desc->irq_data.affinity, mask);
-       case IRQ_SET_MASK_OK_NOCOPY:
-               irq_set_thread_affinity(desc);
-       }
+       irq_do_set_affinity(&desc->irq_data, mask, false);
        return 0;
 }
 #else
@@ -565,7 +573,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                 * IRQF_TRIGGER_* but the PIC does not support multiple
                 * flow-types?
                 */
-               pr_debug("genirq: No set_type function for IRQ %d (%s)\n", irq,
+               pr_debug("No set_type function for IRQ %d (%s)\n", irq,
                         chip ? (chip->name ? : "unknown") : "unknown");
                return 0;
        }
@@ -600,7 +608,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                ret = 0;
                break;
        default:
-               pr_err("genirq: Setting trigger mode %lu for irq %u failed (%pF)\n",
+               pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
                       flags, irq, chip->irq_set_type);
        }
        if (unmask)
@@ -773,11 +781,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
                wake_up(&desc->wait_for_threads);
 }
 
+static void irq_thread_dtor(struct task_work *unused)
+{
+       struct task_struct *tsk = current;
+       struct irq_desc *desc;
+       struct irqaction *action;
+
+       if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
+               return;
+
+       action = kthread_data(tsk);
+
+       pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+              tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
+
+
+       desc = irq_to_desc(action->irq);
+       /*
+        * If IRQTF_RUNTHREAD is set, we need to decrement
+        * desc->threads_active and wake possible waiters.
+        */
+       if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+               wake_threads_waitq(desc);
+
+       /* Prevent a stale desc->threads_oneshot */
+       irq_finalize_oneshot(desc, action);
+}
+
 /*
  * Interrupt handler thread
  */
 static int irq_thread(void *data)
 {
+       struct task_work on_exit_work;
        static const struct sched_param param = {
                .sched_priority = MAX_USER_RT_PRIO/2,
        };
@@ -793,7 +829,9 @@ static int irq_thread(void *data)
                handler_fn = irq_thread_fn;
 
        sched_setscheduler(current, SCHED_FIFO, &param);
-       current->irq_thread = 1;
+
+       init_task_work(&on_exit_work, irq_thread_dtor, NULL);
+       task_work_add(current, &on_exit_work, false);
 
        while (!irq_wait_for_interrupt(action)) {
                irqreturn_t action_ret;
@@ -815,44 +853,11 @@ static int irq_thread(void *data)
         * cannot touch the oneshot mask at this point anymore as
         * __setup_irq() might have given out currents thread_mask
         * again.
-        *
-        * Clear irq_thread. Otherwise exit_irq_thread() would make
-        * fuzz about an active irq thread going into nirvana.
         */
-       current->irq_thread = 0;
+       task_work_cancel(current, irq_thread_dtor);
        return 0;
 }
 
-/*
- * Called from do_exit()
- */
-void exit_irq_thread(void)
-{
-       struct task_struct *tsk = current;
-       struct irq_desc *desc;
-       struct irqaction *action;
-
-       if (!tsk->irq_thread)
-               return;
-
-       action = kthread_data(tsk);
-
-       pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
-              tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
-
-       desc = irq_to_desc(action->irq);
-
-       /*
-        * If IRQTF_RUNTHREAD is set, we need to decrement
-        * desc->threads_active and wake possible waiters.
-        */
-       if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
-               wake_threads_waitq(desc);
-
-       /* Prevent a stale desc->threads_oneshot */
-       irq_finalize_oneshot(desc, action);
-}
-
 static void irq_setup_forced_threading(struct irqaction *new)
 {
        if (!force_irqthreads)
@@ -1044,7 +1049,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * has. The type flags are unreliable as the
                 * underlying chip implementation can override them.
                 */
-               pr_err("genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
+               pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
                       irq);
                ret = -EINVAL;
                goto out_mask;
@@ -1095,7 +1100,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
                if (nmsk != omsk)
                        /* hope the handler works with current  trigger mode */
-                       pr_warning("genirq: irq %d uses trigger mode %u; requested %u\n",
+                       pr_warning("irq %d uses trigger mode %u; requested %u\n",
                                   irq, nmsk, omsk);
        }
 
@@ -1133,7 +1138,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
 mismatch:
        if (!(new->flags & IRQF_PROBE_SHARED)) {
-               pr_err("genirq: Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
+               pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
                       irq, new->flags, new->name, old->flags, old->name);
 #ifdef CONFIG_DEBUG_SHIRQ
                dump_stack();
index c3c89751b327c9cf257c870d046973107be810da..ca3f4aaff707db1d2aa28bee7f388be67a3317b5 100644 (file)
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
         * For correct operation this depends on the caller
         * masking the irqs.
         */
-       if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
-                  < nr_cpu_ids)) {
-               int ret = chip->irq_set_affinity(&desc->irq_data,
-                                                desc->pending_mask, false);
-               switch (ret) {
-               case IRQ_SET_MASK_OK:
-                       cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
-               case IRQ_SET_MASK_OK_NOCOPY:
-                       irq_set_thread_affinity(desc);
-               }
-       }
+       if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+               irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
 
        cpumask_clear(desc->pending_mask);
 }
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
new file mode 100644 (file)
index 0000000..30b7b22
--- /dev/null
@@ -0,0 +1,196 @@
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/fdtable.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cache.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/kcmp.h>
+
+#include <asm/unistd.h>
+
+/*
+ * We don't expose the real in-memory order of objects for security reasons.
+ * But still the comparison results should be suitable for sorting. So we
+ * obfuscate kernel pointers values and compare the production instead.
+ *
+ * The obfuscation is done in two steps. First we xor the kernel pointer with
+ * a random value, which puts pointer into a new position in a reordered space.
+ * Secondly we multiply the xor production with a large odd random number to
+ * permute its bits even more (the odd multiplier guarantees that the product
+ * is unique ever after the high bits are truncated, since any odd number is
+ * relative prime to 2^n).
+ *
+ * Note also that the obfuscation itself is invisible to userspace and if needed
+ * it can be changed to an alternate scheme.
+ */
+static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
+
+static long kptr_obfuscate(long v, int type)
+{
+       return (v ^ cookies[type][0]) * cookies[type][1];
+}
+
+/*
+ * 0 - equal, i.e. v1 = v2
+ * 1 - less than, i.e. v1 < v2
+ * 2 - greater than, i.e. v1 > v2
+ * 3 - not equal but ordering unavailable (reserved for future)
+ */
+static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
+{
+       long ret;
+
+       ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+
+       return (ret < 0) | ((ret > 0) << 1);
+}
+
+/* The caller must have pinned the task */
+static struct file *
+get_file_raw_ptr(struct task_struct *task, unsigned int idx)
+{
+       struct file *file = NULL;
+
+       task_lock(task);
+       rcu_read_lock();
+
+       if (task->files)
+               file = fcheck_files(task->files, idx);
+
+       rcu_read_unlock();
+       task_unlock(task);
+
+       return file;
+}
+
+static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
+{
+       if (likely(m2 != m1))
+               mutex_unlock(m2);
+       mutex_unlock(m1);
+}
+
+static int kcmp_lock(struct mutex *m1, struct mutex *m2)
+{
+       int err;
+
+       if (m2 > m1)
+               swap(m1, m2);
+
+       err = mutex_lock_killable(m1);
+       if (!err && likely(m1 != m2)) {
+               err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
+               if (err)
+                       mutex_unlock(m1);
+       }
+
+       return err;
+}
+
+SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+               unsigned long, idx1, unsigned long, idx2)
+{
+       struct task_struct *task1, *task2;
+       int ret;
+
+       rcu_read_lock();
+
+       /*
+        * Tasks are looked up in caller's PID namespace only.
+        */
+       task1 = find_task_by_vpid(pid1);
+       task2 = find_task_by_vpid(pid2);
+       if (!task1 || !task2)
+               goto err_no_task;
+
+       get_task_struct(task1);
+       get_task_struct(task2);
+
+       rcu_read_unlock();
+
+       /*
+        * One should have enough rights to inspect task details.
+        */
+       ret = kcmp_lock(&task1->signal->cred_guard_mutex,
+                       &task2->signal->cred_guard_mutex);
+       if (ret)
+               goto err;
+       if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
+           !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+               ret = -EPERM;
+               goto err_unlock;
+       }
+
+       switch (type) {
+       case KCMP_FILE: {
+               struct file *filp1, *filp2;
+
+               filp1 = get_file_raw_ptr(task1, idx1);
+               filp2 = get_file_raw_ptr(task2, idx2);
+
+               if (filp1 && filp2)
+                       ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
+               else
+                       ret = -EBADF;
+               break;
+       }
+       case KCMP_VM:
+               ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
+               break;
+       case KCMP_FILES:
+               ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
+               break;
+       case KCMP_FS:
+               ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
+               break;
+       case KCMP_SIGHAND:
+               ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
+               break;
+       case KCMP_IO:
+               ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
+               break;
+       case KCMP_SYSVSEM:
+#ifdef CONFIG_SYSVIPC
+               ret = kcmp_ptr(task1->sysvsem.undo_list,
+                              task2->sysvsem.undo_list,
+                              KCMP_SYSVSEM);
+#else
+               ret = -EOPNOTSUPP;
+#endif
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+err_unlock:
+       kcmp_unlock(&task1->signal->cred_guard_mutex,
+                   &task2->signal->cred_guard_mutex);
+err:
+       put_task_struct(task1);
+       put_task_struct(task2);
+
+       return ret;
+
+err_no_task:
+       rcu_read_unlock();
+       return -ESRCH;
+}
+
+static __init int kcmp_cookies_init(void)
+{
+       int i;
+
+       get_random_bytes(cookies, sizeof(cookies));
+
+       for (i = 0; i < KCMP_TYPES; i++)
+               cookies[i][1] |= (~(~0UL >>  1) | 1);
+
+       return 0;
+}
+arch_initcall(kcmp_cookies_init);
index 05698a7415fea66ea604b87959bde93f5b2673a3..ff2c7cb86d770aaf51712e330dc0f1e8a72a26e6 100644 (file)
@@ -221,13 +221,12 @@ fail:
        return 0;
 }
 
-void call_usermodehelper_freeinfo(struct subprocess_info *info)
+static void call_usermodehelper_freeinfo(struct subprocess_info *info)
 {
        if (info->cleanup)
                (*info->cleanup)(info);
        kfree(info);
 }
-EXPORT_SYMBOL(call_usermodehelper_freeinfo);
 
 static void umh_complete(struct subprocess_info *sub_info)
 {
@@ -410,7 +409,7 @@ EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
 
 /**
  * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
- * depth: New value to assign to usermodehelper_disabled.
+ * @depth: New value to assign to usermodehelper_disabled.
  *
  * Change the value of usermodehelper_disabled (under umhelper_sem locked for
  * writing) and wakeup tasks waiting for it to change.
@@ -479,6 +478,7 @@ static void helper_unlock(void)
  * structure.  This should be passed to call_usermodehelper_exec to
  * exec the process and free the structure.
  */
+static
 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
                                                  char **envp, gfp_t gfp_mask)
 {
@@ -494,7 +494,6 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
   out:
        return sub_info;
 }
-EXPORT_SYMBOL(call_usermodehelper_setup);
 
 /**
  * call_usermodehelper_setfns - set a cleanup/init function
@@ -512,6 +511,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
  * Function must be runnable in either a process context or the
  * context in which call_usermodehelper_exec is called.
  */
+static
 void call_usermodehelper_setfns(struct subprocess_info *info,
                    int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
@@ -521,7 +521,6 @@ void call_usermodehelper_setfns(struct subprocess_info *info,
        info->init = init;
        info->data = data;
 }
-EXPORT_SYMBOL(call_usermodehelper_setfns);
 
 /**
  * call_usermodehelper_exec - start a usermode application
@@ -535,6 +534,7 @@ EXPORT_SYMBOL(call_usermodehelper_setfns);
  * asynchronously if wait is not set, and runs as a child of keventd.
  * (ie. it runs with full root capabilities).
  */
+static
 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
 {
        DECLARE_COMPLETION_ONSTACK(done);
@@ -576,7 +576,25 @@ unlock:
        helper_unlock();
        return retval;
 }
-EXPORT_SYMBOL(call_usermodehelper_exec);
+
+int call_usermodehelper_fns(
+       char *path, char **argv, char **envp, int wait,
+       int (*init)(struct subprocess_info *info, struct cred *new),
+       void (*cleanup)(struct subprocess_info *), void *data)
+{
+       struct subprocess_info *info;
+       gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
+
+       info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+
+       if (info == NULL)
+               return -ENOMEM;
+
+       call_usermodehelper_setfns(info, init, cleanup, data);
+
+       return call_usermodehelper_exec(info, wait);
+}
+EXPORT_SYMBOL(call_usermodehelper_fns);
 
 static int proc_cap_handler(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
diff --git a/kernel/lglock.c b/kernel/lglock.c
new file mode 100644 (file)
index 0000000..6535a66
--- /dev/null
@@ -0,0 +1,89 @@
+/* See include/linux/lglock.h for description */
+#include <linux/module.h>
+#include <linux/lglock.h>
+#include <linux/cpu.h>
+#include <linux/string.h>
+
+/*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+ * Could be added though, just undo lg_lock_init
+ */
+
+void lg_lock_init(struct lglock *lg, char *name)
+{
+       LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+}
+EXPORT_SYMBOL(lg_lock_init);
+
+void lg_local_lock(struct lglock *lg)
+{
+       arch_spinlock_t *lock;
+
+       preempt_disable();
+       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock = this_cpu_ptr(lg->lock);
+       arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock);
+
+void lg_local_unlock(struct lglock *lg)
+{
+       arch_spinlock_t *lock;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock = this_cpu_ptr(lg->lock);
+       arch_spin_unlock(lock);
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock);
+
+void lg_local_lock_cpu(struct lglock *lg, int cpu)
+{
+       arch_spinlock_t *lock;
+
+       preempt_disable();
+       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock = per_cpu_ptr(lg->lock, cpu);
+       arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock_cpu);
+
+void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+{
+       arch_spinlock_t *lock;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock = per_cpu_ptr(lg->lock, cpu);
+       arch_spin_unlock(lock);
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+void lg_global_lock(struct lglock *lg)
+{
+       int i;
+
+       preempt_disable();
+       rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       for_each_possible_cpu(i) {
+               arch_spinlock_t *lock;
+               lock = per_cpu_ptr(lg->lock, i);
+               arch_spin_lock(lock);
+       }
+}
+EXPORT_SYMBOL(lg_global_lock);
+
+void lg_global_unlock(struct lglock *lg)
+{
+       int i;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       for_each_possible_cpu(i) {
+               arch_spinlock_t *lock;
+               lock = per_cpu_ptr(lg->lock, i);
+               arch_spin_unlock(lock);
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_global_unlock);
index 8ed89a175d79376600dfbfd777f06ed3c24665a5..d2a5f4ecc6ddd2ebc2da68764da646c7b96d9fc9 100644 (file)
@@ -27,7 +27,7 @@
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
 
-int panic_on_oops;
+int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
 static unsigned long tainted_mask;
 static int pause_on_oops;
 static int pause_on_oops_flag;
@@ -108,8 +108,6 @@ void panic(const char *fmt, ...)
         */
        crash_kexec(NULL);
 
-       kmsg_dump(KMSG_DUMP_PANIC);
-
        /*
         * Note smp_send_stop is the usual smp shutdown function, which
         * unfortunately means it may not be hardened to work in a panic
@@ -117,6 +115,8 @@ void panic(const char *fmt, ...)
         */
        smp_send_stop();
 
+       kmsg_dump(KMSG_DUMP_PANIC);
+
        atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 
        bust_spinlocks(0);
index 57bc1fd35b3cbe6bffdbfe71af5f13fc00648b81..b3c7fd5542500ab13940814211692bbe3b7ef8d3 100644 (file)
@@ -149,7 +149,12 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
 {
        int nr;
        int rc;
-       struct task_struct *task;
+       struct task_struct *task, *me = current;
+
+       /* Ignore SIGCHLD causing any terminated children to autoreap */
+       spin_lock_irq(&me->sighand->siglock);
+       me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
+       spin_unlock_irq(&me->sighand->siglock);
 
        /*
         * The last thread in the cgroup-init thread group is terminating.
@@ -179,11 +184,31 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        }
        read_unlock(&tasklist_lock);
 
+       /* Firstly reap the EXIT_ZOMBIE children we may have. */
        do {
                clear_thread_flag(TIF_SIGPENDING);
                rc = sys_wait4(-1, NULL, __WALL, NULL);
        } while (rc != -ECHILD);
 
+       /*
+        * sys_wait4() above can't reap the TASK_DEAD children.
+        * Make sure they all go away, see __unhash_process().
+        */
+       for (;;) {
+               bool need_wait = false;
+
+               read_lock(&tasklist_lock);
+               if (!list_empty(&current->children)) {
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       need_wait = true;
+               }
+               read_unlock(&tasklist_lock);
+
+               if (!need_wait)
+                       break;
+               schedule();
+       }
+
        if (pid_ns->reboot)
                current->signal->group_exit_code = pid_ns->reboot;
 
@@ -191,6 +216,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        return;
 }
 
+#ifdef CONFIG_CHECKPOINT_RESTORE
 static int pid_ns_ctl_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -218,8 +244,8 @@ static struct ctl_table pid_ns_ctl_table[] = {
        },
        { }
 };
-
 static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
+#endif /* CONFIG_CHECKPOINT_RESTORE */
 
 int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
 {
@@ -253,7 +279,10 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
 static __init int pid_namespaces_init(void)
 {
        pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
        register_sysctl_paths(kern_path, pid_ns_ctl_table);
+#endif
        return 0;
 }
 
index 8b53db38a279e50a1fe3a8719752fe245201e7a0..238025f5472e42d745752f0c29d3534ab7b66285 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/syscore_ops.h>
 #include <linux/ctype.h>
 #include <linux/genhd.h>
-#include <scsi/scsi_scan.h>
 
 #include "power.h"
 
@@ -748,13 +747,6 @@ static int software_resume(void)
                        async_synchronize_full();
                }
 
-               /*
-                * We can't depend on SCSI devices being available after loading
-                * one of their modules until scsi_complete_async_scans() is
-                * called and the resume device usually is a SCSI one.
-                */
-               scsi_complete_async_scans();
-
                swsusp_resume_device = name_to_dev_t(resume_file);
                if (!swsusp_resume_device) {
                        error = -ENODEV;
index 91b0fd021a95d08eef106b3e0b86a41e3a135055..4ed81e74f86fbb2a9d7b0ab8e7d105d0944763c0 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/freezer.h>
-#include <scsi/scsi_scan.h>
 
 #include <asm/uaccess.h>
 
@@ -84,7 +83,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
                 * appear.
                 */
                wait_for_device_probe();
-               scsi_complete_async_scans();
 
                data->swap = -1;
                data->mode = O_WRONLY;
index 32462d2b364ae68280b7a8c773e2f25c119e5091..ac4bc9e79465ed6cf416d9694f8ddc463e2e77ab 100644 (file)
@@ -193,12 +193,21 @@ static int console_may_schedule;
  * separated by ',', and find the message after the ';' character.
  */
 
+enum log_flags {
+       LOG_NOCONS      = 1,    /* already flushed, do not print to console */
+       LOG_NEWLINE     = 2,    /* text ended with a newline */
+       LOG_PREFIX      = 4,    /* text started with a prefix */
+       LOG_CONT        = 8,    /* text is a fragment of a continuation line */
+};
+
 struct log {
        u64 ts_nsec;            /* timestamp in nanoseconds */
        u16 len;                /* length of entire record */
        u16 text_len;           /* length of text buffer */
        u16 dict_len;           /* length of dictionary buffer */
-       u16 level;              /* syslog level + facility */
+       u8 facility;            /* syslog facility */
+       u8 flags:5;             /* internal record flags */
+       u8 level:3;             /* syslog level */
 };
 
 /*
@@ -210,6 +219,8 @@ static DEFINE_RAW_SPINLOCK(logbuf_lock);
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 static u64 syslog_seq;
 static u32 syslog_idx;
+static enum log_flags syslog_prev;
+static size_t syslog_partial;
 
 /* index and sequence number of the first record stored in the buffer */
 static u64 log_first_seq;
@@ -227,10 +238,10 @@ static u32 clear_idx;
 #define LOG_LINE_MAX 1024
 
 /* record buffer */
-#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 #define LOG_ALIGN 4
 #else
-#define LOG_ALIGN 8
+#define LOG_ALIGN __alignof__(struct log)
 #endif
 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -286,6 +297,7 @@ static u32 log_next(u32 idx)
 
 /* insert record into the buffer, discard old ones, update heads */
 static void log_store(int facility, int level,
+                     enum log_flags flags, u64 ts_nsec,
                      const char *dict, u16 dict_len,
                      const char *text, u16 text_len)
 {
@@ -329,8 +341,13 @@ static void log_store(int facility, int level,
        msg->text_len = text_len;
        memcpy(log_dict(msg), dict, dict_len);
        msg->dict_len = dict_len;
-       msg->level = (facility << 3) | (level & 7);
-       msg->ts_nsec = local_clock();
+       msg->facility = facility;
+       msg->level = level & 7;
+       msg->flags = flags & 0x1f;
+       if (ts_nsec > 0)
+               msg->ts_nsec = ts_nsec;
+       else
+               msg->ts_nsec = local_clock();
        memset(log_dict(msg) + dict_len, 0, pad_len);
        msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
 
@@ -414,21 +431,23 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        if (!user)
                return -EBADF;
 
-       mutex_lock(&user->lock);
-       raw_spin_lock(&logbuf_lock);
+       ret = mutex_lock_interruptible(&user->lock);
+       if (ret)
+               return ret;
+       raw_spin_lock_irq(&logbuf_lock);
        while (user->seq == log_next_seq) {
                if (file->f_flags & O_NONBLOCK) {
                        ret = -EAGAIN;
-                       raw_spin_unlock(&logbuf_lock);
+                       raw_spin_unlock_irq(&logbuf_lock);
                        goto out;
                }
 
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                ret = wait_event_interruptible(log_wait,
                                               user->seq != log_next_seq);
                if (ret)
                        goto out;
-               raw_spin_lock(&logbuf_lock);
+               raw_spin_lock_irq(&logbuf_lock);
        }
 
        if (user->seq < log_first_seq) {
@@ -436,7 +455,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                user->idx = log_first_idx;
                user->seq = log_first_seq;
                ret = -EPIPE;
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                goto out;
        }
 
@@ -444,13 +463,13 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        ts_usec = msg->ts_nsec;
        do_div(ts_usec, 1000);
        len = sprintf(user->buf, "%u,%llu,%llu;",
-                     msg->level, user->seq, ts_usec);
+                     (msg->facility << 3) | msg->level, user->seq, ts_usec);
 
        /* escape non-printable characters */
        for (i = 0; i < msg->text_len; i++) {
                unsigned char c = log_text(msg)[i];
 
-               if (c < ' ' || c >= 128)
+               if (c < ' ' || c >= 127 || c == '\\')
                        len += sprintf(user->buf + len, "\\x%02x", c);
                else
                        user->buf[len++] = c;
@@ -474,7 +493,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                                continue;
                        }
 
-                       if (c < ' ' || c >= 128) {
+                       if (c < ' ' || c >= 127 || c == '\\') {
                                len += sprintf(user->buf + len, "\\x%02x", c);
                                continue;
                        }
@@ -486,7 +505,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
 
        user->idx = log_next(user->idx);
        user->seq++;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        if (len > count) {
                ret = -EINVAL;
@@ -513,7 +532,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        if (offset)
                return -ESPIPE;
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        switch (whence) {
        case SEEK_SET:
                /* the first record */
@@ -537,7 +556,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        default:
                ret = -EINVAL;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
        return ret;
 }
 
@@ -551,14 +570,14 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &log_wait, wait);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        if (user->seq < log_next_seq) {
                /* return error when data has vanished underneath us */
                if (user->seq < log_first_seq)
                        ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
                ret = POLLIN|POLLRDNORM;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        return ret;
 }
@@ -582,10 +601,10 @@ static int devkmsg_open(struct inode *inode, struct file *file)
 
        mutex_init(&user->lock);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        user->idx = log_first_idx;
        user->seq = log_first_seq;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        file->private_data = user;
        return 0;
@@ -785,44 +804,64 @@ static bool printk_time;
 #endif
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 
+static size_t print_time(u64 ts, char *buf)
+{
+       unsigned long rem_nsec;
+
+       if (!printk_time)
+               return 0;
+
+       if (!buf)
+               return 15;
+
+       rem_nsec = do_div(ts, 1000000000);
+       return sprintf(buf, "[%5lu.%06lu] ",
+                      (unsigned long)ts, rem_nsec / 1000);
+}
+
 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 {
        size_t len = 0;
+       unsigned int prefix = (msg->facility << 3) | msg->level;
 
        if (syslog) {
                if (buf) {
-                       len += sprintf(buf, "<%u>", msg->level);
+                       len += sprintf(buf, "<%u>", prefix);
                } else {
                        len += 3;
-                       if (msg->level > 9)
-                               len++;
-                       if (msg->level > 99)
+                       if (prefix > 999)
+                               len += 3;
+                       else if (prefix > 99)
+                               len += 2;
+                       else if (prefix > 9)
                                len++;
                }
        }
 
-       if (printk_time) {
-               if (buf) {
-                       unsigned long long ts = msg->ts_nsec;
-                       unsigned long rem_nsec = do_div(ts, 1000000000);
-
-                       len += sprintf(buf + len, "[%5lu.%06lu] ",
-                                        (unsigned long) ts, rem_nsec / 1000);
-               } else {
-                       len += 15;
-               }
-       }
-
+       len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
        return len;
 }
 
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size)
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size)
 {
        const char *text = log_text(msg);
        size_t text_size = msg->text_len;
+       bool prefix = true;
+       bool newline = true;
        size_t len = 0;
 
+       if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
+               prefix = false;
+
+       if (msg->flags & LOG_CONT) {
+               if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
+                       prefix = false;
+
+               if (!(msg->flags & LOG_NEWLINE))
+                       newline = false;
+       }
+
        do {
                const char *next = memchr(text, '\n', text_size);
                size_t text_len;
@@ -840,16 +879,22 @@ static size_t msg_print_text(const struct log *msg, bool syslog,
                            text_len + 1>= size - len)
                                break;
 
-                       len += print_prefix(msg, syslog, buf + len);
+                       if (prefix)
+                               len += print_prefix(msg, syslog, buf + len);
                        memcpy(buf + len, text, text_len);
                        len += text_len;
-                       buf[len++] = '\n';
+                       if (next || newline)
+                               buf[len++] = '\n';
                } else {
                        /* SYSLOG_ACTION_* buffer size only calculation */
-                       len += print_prefix(msg, syslog, NULL);
-                       len += text_len + 1;
+                       if (prefix)
+                               len += print_prefix(msg, syslog, NULL);
+                       len += text_len;
+                       if (next || newline)
+                               len++;
                }
 
+               prefix = true;
                text = next;
        } while (text);
 
@@ -860,26 +905,60 @@ static int syslog_print(char __user *buf, int size)
 {
        char *text;
        struct log *msg;
-       int len;
+       int len = 0;
 
        text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
        if (!text)
                return -ENOMEM;
 
-       raw_spin_lock_irq(&logbuf_lock);
-       if (syslog_seq < log_first_seq) {
-               /* messages are gone, move to first one */
-               syslog_seq = log_first_seq;
-               syslog_idx = log_first_idx;
-       }
-       msg = log_from_idx(syslog_idx);
-       len = msg_print_text(msg, true, text, LOG_LINE_MAX);
-       syslog_idx = log_next(syslog_idx);
-       syslog_seq++;
-       raw_spin_unlock_irq(&logbuf_lock);
+       while (size > 0) {
+               size_t n;
+               size_t skip;
 
-       if (len > 0 && copy_to_user(buf, text, len))
-               len = -EFAULT;
+               raw_spin_lock_irq(&logbuf_lock);
+               if (syslog_seq < log_first_seq) {
+                       /* messages are gone, move to first one */
+                       syslog_seq = log_first_seq;
+                       syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
+               }
+               if (syslog_seq == log_next_seq) {
+                       raw_spin_unlock_irq(&logbuf_lock);
+                       break;
+               }
+
+               skip = syslog_partial;
+               msg = log_from_idx(syslog_idx);
+               n = msg_print_text(msg, syslog_prev, true, text, LOG_LINE_MAX);
+               if (n - syslog_partial <= size) {
+                       /* message fits into buffer, move forward */
+                       syslog_idx = log_next(syslog_idx);
+                       syslog_seq++;
+                       syslog_prev = msg->flags;
+                       n -= syslog_partial;
+                       syslog_partial = 0;
+               } else if (!len){
+                       /* partial read(), remember position */
+                       n = size;
+                       syslog_partial += n;
+               } else
+                       n = 0;
+               raw_spin_unlock_irq(&logbuf_lock);
+
+               if (!n)
+                       break;
+
+               if (copy_to_user(buf, text + skip, n)) {
+                       if (!len)
+                               len = -EFAULT;
+                       break;
+               }
+
+               len += n;
+               size -= n;
+               buf += n;
+       }
 
        kfree(text);
        return len;
@@ -899,6 +978,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                u64 next_seq;
                u64 seq;
                u32 idx;
+               enum log_flags prev;
 
                if (clear_seq < log_first_seq) {
                        /* messages are gone, move to first available one */
@@ -909,41 +989,47 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                /*
                 * Find first record that fits, including all following records,
                 * into the user-provided buffer for this dump.
-               */
+                */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len += msg_print_text(msg, true, NULL, 0);
+                       len += msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
+
+               /* move first record forward until length fits into the buffer */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (len > size && seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len -= msg_print_text(msg, true, NULL, 0);
+                       len -= msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
 
-               /* last message in this dump */
+               /* last message fitting into this dump */
                next_seq = log_next_seq;
 
                len = 0;
+               prev = 0;
                while (len >= 0 && seq < next_seq) {
                        struct log *msg = log_from_idx(idx);
                        int textlen;
 
-                       textlen = msg_print_text(msg, true, text, LOG_LINE_MAX);
+                       textlen = msg_print_text(msg, prev, true, text, LOG_LINE_MAX);
                        if (textlen < 0) {
                                len = textlen;
                                break;
                        }
                        idx = log_next(idx);
                        seq++;
+                       prev = msg->flags;
 
                        raw_spin_unlock_irq(&logbuf_lock);
                        if (copy_to_user(buf + len, text, textlen))
@@ -956,6 +1042,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                                /* messages are gone, move to next one */
                                seq = log_first_seq;
                                idx = log_first_idx;
+                               prev = 0;
                        }
                }
        }
@@ -1027,6 +1114,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
        /* Clear ring buffer */
        case SYSLOG_ACTION_CLEAR:
                syslog_print_all(NULL, 0, true);
+               break;
        /* Disable logging to console */
        case SYSLOG_ACTION_CONSOLE_OFF:
                if (saved_console_loglevel == -1)
@@ -1059,6 +1147,8 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                        /* messages are gone, move to first one */
                        syslog_seq = log_first_seq;
                        syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
                }
                if (from_file) {
                        /*
@@ -1068,19 +1158,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                         */
                        error = log_next_idx - syslog_idx;
                } else {
-                       u64 seq;
-                       u32 idx;
+                       u64 seq = syslog_seq;
+                       u32 idx = syslog_idx;
+                       enum log_flags prev = syslog_prev;
 
                        error = 0;
-                       seq = syslog_seq;
-                       idx = syslog_idx;
                        while (seq < log_next_seq) {
                                struct log *msg = log_from_idx(idx);
 
-                               error += msg_print_text(msg, true, NULL, 0);
+                               error += msg_print_text(msg, prev, true, NULL, 0);
                                idx = log_next(idx);
                                seq++;
+                               prev = msg->flags;
                        }
+                       error -= syslog_partial;
                }
                raw_spin_unlock_irq(&logbuf_lock);
                break;
@@ -1101,21 +1192,6 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
        return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
 }
 
-#ifdef CONFIG_KGDB_KDB
-/* kdb dmesg command needs access to the syslog buffer.  do_syslog()
- * uses locks so it cannot be used during debugging.  Just tell kdb
- * where the start and end of the physical and logical logs are.  This
- * is equivalent to do_syslog(3).
- */
-void kdb_syslog_data(char *syslog_data[4])
-{
-       syslog_data[0] = log_buf;
-       syslog_data[1] = log_buf + log_buf_len;
-       syslog_data[2] = log_buf + log_first_idx;
-       syslog_data[3] = log_buf + log_next_idx;
-}
-#endif /* CONFIG_KGDB_KDB */
-
 static bool __read_mostly ignore_loglevel;
 
 static int __init ignore_loglevel_setup(char *str)
@@ -1259,22 +1335,98 @@ static inline void printk_delay(void)
        }
 }
 
+/*
+ * Continuation lines are buffered, and not committed to the record buffer
+ * until the line is complete, or a race forces it. The line fragments
+ * though, are printed immediately to the consoles to ensure everything has
+ * reached the console in case of a kernel crash.
+ */
+static struct cont {
+       char buf[LOG_LINE_MAX];
+       size_t len;                     /* length == 0 means unused buffer */
+       size_t cons;                    /* bytes written to console */
+       struct task_struct *owner;      /* task of first print*/
+       u64 ts_nsec;                    /* time of first print */
+       u8 level;                       /* log level of first message */
+       u8 facility;                    /* log level of first message */
+       bool flushed:1;                 /* buffer sealed and committed */
+} cont;
+
+static void cont_flush(void)
+{
+       if (cont.flushed)
+               return;
+       if (cont.len == 0)
+               return;
+
+       log_store(cont.facility, cont.level, LOG_NOCONS, cont.ts_nsec,
+                 NULL, 0, cont.buf, cont.len);
+
+       cont.flushed = true;
+}
+
+static bool cont_add(int facility, int level, const char *text, size_t len)
+{
+       if (cont.len && cont.flushed)
+               return false;
+
+       if (cont.len + len > sizeof(cont.buf)) {
+               cont_flush();
+               return false;
+       }
+
+       if (!cont.len) {
+               cont.facility = facility;
+               cont.level = level;
+               cont.owner = current;
+               cont.ts_nsec = local_clock();
+               cont.cons = 0;
+               cont.flushed = false;
+       }
+
+       memcpy(cont.buf + cont.len, text, len);
+       cont.len += len;
+       return true;
+}
+
+static size_t cont_print_text(char *text, size_t size)
+{
+       size_t textlen = 0;
+       size_t len;
+
+       if (cont.cons == 0) {
+               textlen += print_time(cont.ts_nsec, text);
+               size -= textlen;
+       }
+
+       len = cont.len - cont.cons;
+       if (len > 0) {
+               if (len+1 > size)
+                       len = size-1;
+               memcpy(text + textlen, cont.buf + cont.cons, len);
+               textlen += len;
+               cont.cons = cont.len;
+       }
+
+       if (cont.flushed) {
+               text[textlen++] = '\n';
+               /* got everything, release buffer */
+               cont.len = 0;
+       }
+       return textlen;
+}
+
 asmlinkage int vprintk_emit(int facility, int level,
                            const char *dict, size_t dictlen,
                            const char *fmt, va_list args)
 {
        static int recursion_bug;
-       static char cont_buf[LOG_LINE_MAX];
-       static size_t cont_len;
-       static int cont_level;
-       static struct task_struct *cont_task;
        static char textbuf[LOG_LINE_MAX];
        char *text = textbuf;
        size_t text_len;
+       enum log_flags lflags = 0;
        unsigned long flags;
        int this_cpu;
-       bool newline = false;
-       bool prefix = false;
        int printed_len = 0;
 
        boot_delay_msec();
@@ -1313,7 +1465,8 @@ asmlinkage int vprintk_emit(int facility, int level,
                recursion_bug = 0;
                printed_len += strlen(recursion_msg);
                /* emit KERN_CRIT message */
-               log_store(0, 2, NULL, 0, recursion_msg, printed_len);
+               log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
+                         NULL, 0, recursion_msg, printed_len);
        }
 
        /*
@@ -1325,7 +1478,7 @@ asmlinkage int vprintk_emit(int facility, int level,
        /* mark and strip a trailing newline */
        if (text_len && text[text_len-1] == '\n') {
                text_len--;
-               newline = true;
+               lflags |= LOG_NEWLINE;
        }
 
        /* strip syslog prefix and extract log level or control flags */
@@ -1335,7 +1488,7 @@ asmlinkage int vprintk_emit(int facility, int level,
                        if (level == -1)
                                level = text[1] - '0';
                case 'd':       /* KERN_DEFAULT */
-                       prefix = true;
+                       lflags |= LOG_PREFIX;
                case 'c':       /* KERN_CONT */
                        text += 3;
                        text_len -= 3;
@@ -1345,61 +1498,41 @@ asmlinkage int vprintk_emit(int facility, int level,
        if (level == -1)
                level = default_message_loglevel;
 
-       if (dict) {
-               prefix = true;
-               newline = true;
-       }
-
-       if (!newline) {
-               if (cont_len && (prefix || cont_task != current)) {
-                       /*
-                        * Flush earlier buffer, which is either from a
-                        * different thread, or when we got a new prefix.
-                        */
-                       log_store(facility, cont_level, NULL, 0, cont_buf, cont_len);
-                       cont_len = 0;
-               }
+       if (dict)
+               lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-               if (!cont_len) {
-                       cont_level = level;
-                       cont_task = current;
-               }
+       if (!(lflags & LOG_NEWLINE)) {
+               /*
+                * Flush the conflicting buffer. An earlier newline was missing,
+                * or another task also prints continuation lines.
+                */
+               if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
+                       cont_flush();
 
-               /* buffer or append to earlier buffer from the same thread */
-               if (cont_len + text_len > sizeof(cont_buf))
-                       text_len = sizeof(cont_buf) - cont_len;
-               memcpy(cont_buf + cont_len, text, text_len);
-               cont_len += text_len;
+               /* buffer line if possible, otherwise store it right away */
+               if (!cont_add(facility, level, text, text_len))
+                       log_store(facility, level, lflags | LOG_CONT, 0,
+                                 dict, dictlen, text, text_len);
        } else {
-               if (cont_len && cont_task == current) {
-                       if (prefix) {
-                               /*
-                                * New prefix from the same thread; flush. We
-                                * either got no earlier newline, or we race
-                                * with an interrupt.
-                                */
-                               log_store(facility, cont_level,
-                                         NULL, 0, cont_buf, cont_len);
-                               cont_len = 0;
-                       }
+               bool stored = false;
 
-                       /* append to the earlier buffer and flush */
-                       if (cont_len + text_len > sizeof(cont_buf))
-                               text_len = sizeof(cont_buf) - cont_len;
-                       memcpy(cont_buf + cont_len, text, text_len);
-                       cont_len += text_len;
-                       log_store(facility, cont_level,
-                                 NULL, 0, cont_buf, cont_len);
-                       cont_len = 0;
-                       cont_task = NULL;
-                       printed_len = cont_len;
-               } else {
-                       /* ordinary single and terminated line */
-                       log_store(facility, level,
-                                 dict, dictlen, text, text_len);
-                       printed_len = text_len;
+               /*
+                * If an earlier newline was missing and it was the same task,
+                * either merge it with the current buffer and flush, or if
+                * there was a race with interrupts (prefix == true) then just
+                * flush it out and store this line separately.
+                */
+               if (cont.len && cont.owner == current) {
+                       if (!(lflags & LOG_PREFIX))
+                               stored = cont_add(facility, level, text, text_len);
+                       cont_flush();
                }
+
+               if (!stored)
+                       log_store(facility, level, lflags, 0,
+                                 dict, dictlen, text, text_len);
        }
+       printed_len += text_len;
 
        /*
         * Try to acquire and then immediately release the console semaphore.
@@ -1486,11 +1619,18 @@ EXPORT_SYMBOL(printk);
 #else
 
 #define LOG_LINE_MAX 0
+static struct cont {
+       size_t len;
+       size_t cons;
+       u8 level;
+       bool flushed:1;
+} cont;
 static struct log *log_from_idx(u32 idx) { return NULL; }
 static u32 log_next(u32 idx) { return 0; }
 static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size) { return 0; }
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size) { return 0; }
+static size_t cont_print_text(char *text, size_t size) { return 0; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -1765,6 +1905,7 @@ void wake_up_klogd(void)
 /* the next printk record to write to the console */
 static u64 console_seq;
 static u32 console_idx;
+static enum log_flags console_prev;
 
 /**
  * console_unlock - unlock the console system
@@ -1782,6 +1923,7 @@ static u32 console_idx;
  */
 void console_unlock(void)
 {
+       static char text[LOG_LINE_MAX];
        static u64 seen_seq;
        unsigned long flags;
        bool wake_klogd = false;
@@ -1794,10 +1936,23 @@ void console_unlock(void)
 
        console_may_schedule = 0;
 
+       /* flush buffered message fragment immediately to console */
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       if (cont.len && (cont.cons < cont.len || cont.flushed)) {
+               size_t len;
+
+               len = cont_print_text(text, sizeof(text));
+               raw_spin_unlock(&logbuf_lock);
+               stop_critical_timings();
+               call_console_drivers(cont.level, text, len);
+               start_critical_timings();
+               local_irq_restore(flags);
+       } else
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
 again:
        for (;;) {
                struct log *msg;
-               static char text[LOG_LINE_MAX];
                size_t len;
                int level;
 
@@ -1811,18 +1966,35 @@ again:
                        /* messages are gone, move to first one */
                        console_seq = log_first_seq;
                        console_idx = log_first_idx;
+                       console_prev = 0;
                }
-
+skip:
                if (console_seq == log_next_seq)
                        break;
 
                msg = log_from_idx(console_idx);
-               level = msg->level & 7;
-
-               len = msg_print_text(msg, false, text, sizeof(text));
+               if (msg->flags & LOG_NOCONS) {
+                       /*
+                        * Skip record we have buffered and already printed
+                        * directly to the console when we received it.
+                        */
+                       console_idx = log_next(console_idx);
+                       console_seq++;
+                       /*
+                        * We will get here again when we register a new
+                        * CON_PRINTBUFFER console. Clear the flag so we
+                        * will properly dump everything later.
+                        */
+                       msg->flags &= ~LOG_NOCONS;
+                       goto skip;
+               }
 
+               level = msg->level;
+               len = msg_print_text(msg, console_prev, false,
+                                    text, sizeof(text));
                console_idx = log_next(console_idx);
                console_seq++;
+               console_prev = msg->flags;
                raw_spin_unlock(&logbuf_lock);
 
                stop_critical_timings();        /* don't trace print latency */
@@ -2085,6 +2257,7 @@ void register_console(struct console *newcon)
                raw_spin_lock_irqsave(&logbuf_lock, flags);
                console_seq = syslog_seq;
                console_idx = syslog_idx;
+               console_prev = syslog_prev;
                raw_spin_unlock_irqrestore(&logbuf_lock, flags);
                /*
                 * We're about to replay the log buffer.  Only do this to the
@@ -2300,48 +2473,256 @@ module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
  * kmsg_dump - dump kernel log to kernel message dumpers.
  * @reason: the reason (oops, panic etc) for dumping
  *
- * Iterate through each of the dump devices and call the oops/panic
- * callbacks with the log buffer.
+ * Call each of the registered dumper's dump() callback, which can
+ * retrieve the kmsg records with kmsg_dump_get_line() or
+ * kmsg_dump_get_buffer().
  */
 void kmsg_dump(enum kmsg_dump_reason reason)
 {
-       u64 idx;
        struct kmsg_dumper *dumper;
-       const char *s1, *s2;
-       unsigned long l1, l2;
        unsigned long flags;
 
        if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
                return;
 
-       /* Theoretically, the log could move on after we do this, but
-          there's not a lot we can do about that. The new messages
-          will overwrite the start of what we dump. */
+       rcu_read_lock();
+       list_for_each_entry_rcu(dumper, &dump_list, list) {
+               if (dumper->max_reason && reason > dumper->max_reason)
+                       continue;
+
+               /* initialize iterator with data about the stored records */
+               dumper->active = true;
+
+               raw_spin_lock_irqsave(&logbuf_lock, flags);
+               dumper->cur_seq = clear_seq;
+               dumper->cur_idx = clear_idx;
+               dumper->next_seq = log_next_seq;
+               dumper->next_idx = log_next_idx;
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+               /* invoke dumper which will iterate over records */
+               dumper->dump(dumper, reason);
+
+               /* reset iterator */
+               dumper->active = false;
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ *
+ * The function is similar to kmsg_dump_get_line(), but grabs no locks.
+ */
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+                              char *line, size_t size, size_t *len)
+{
+       struct log *msg;
+       size_t l = 0;
+       bool ret = false;
+
+       if (!dumper->active)
+               goto out;
+
+       if (dumper->cur_seq < log_first_seq) {
+               /* messages are gone, move to first available one */
+               dumper->cur_seq = log_first_seq;
+               dumper->cur_idx = log_first_idx;
+       }
+
+       /* last entry */
+       if (dumper->cur_seq >= log_next_seq)
+               goto out;
+
+       msg = log_from_idx(dumper->cur_idx);
+       l = msg_print_text(msg, 0, syslog, line, size);
+
+       dumper->cur_idx = log_next(dumper->cur_idx);
+       dumper->cur_seq++;
+       ret = true;
+out:
+       if (len)
+               *len = l;
+       return ret;
+}
+
+/**
+ * kmsg_dump_get_line - retrieve one kmsg log line
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                       char *line, size_t size, size_t *len)
+{
+       unsigned long flags;
+       bool ret;
+
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+
+/**
+ * kmsg_dump_get_buffer - copy kmsg log lines
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @buf: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the end of the kmsg buffer and fill the provided buffer
+ * with as many of the the *youngest* kmsg records that fit into it.
+ * If the buffer is large enough, all available kmsg records will be
+ * copied with a single call.
+ *
+ * Consecutive calls will fill the buffer with the next block of
+ * available older records, not including the earlier retrieved ones.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                         char *buf, size_t size, size_t *len)
+{
+       unsigned long flags;
+       u64 seq;
+       u32 idx;
+       u64 next_seq;
+       u32 next_idx;
+       enum log_flags prev;
+       size_t l = 0;
+       bool ret = false;
+
+       if (!dumper->active)
+               goto out;
 
        raw_spin_lock_irqsave(&logbuf_lock, flags);
-       if (syslog_seq < log_first_seq)
-               idx = syslog_idx;
-       else
-               idx = log_first_idx;
+       if (dumper->cur_seq < log_first_seq) {
+               /* messages are gone, move to first available one */
+               dumper->cur_seq = log_first_seq;
+               dumper->cur_idx = log_first_idx;
+       }
+
+       /* last entry */
+       if (dumper->cur_seq >= dumper->next_seq) {
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+               goto out;
+       }
 
-       if (idx > log_next_idx) {
-               s1 = log_buf;
-               l1 = log_next_idx;
+       /* calculate length of entire buffer */
+       seq = dumper->cur_seq;
+       idx = dumper->cur_idx;
+       prev = 0;
+       while (seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
+
+               l += msg_print_text(msg, prev, true, NULL, 0);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
+       }
 
-               s2 = log_buf + idx;
-               l2 = log_buf_len - idx;
-       } else {
-               s1 = "";
-               l1 = 0;
+       /* move first record forward until length fits into the buffer */
+       seq = dumper->cur_seq;
+       idx = dumper->cur_idx;
+       prev = 0;
+       while (l > size && seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
+
+               l -= msg_print_text(msg, prev, true, NULL, 0);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
+       }
+
+       /* last message in next interation */
+       next_seq = seq;
+       next_idx = idx;
+
+       l = 0;
+       prev = 0;
+       while (seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
 
-               s2 = log_buf + idx;
-               l2 = log_next_idx - idx;
+               l += msg_print_text(msg, prev, syslog, buf + l, size - l);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
        }
+
+       dumper->next_seq = next_seq;
+       dumper->next_idx = next_idx;
+       ret = true;
        raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+out:
+       if (len)
+               *len = l;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(dumper, &dump_list, list)
-               dumper->dump(dumper, reason, s1, l1, s2, l2);
-       rcu_read_unlock();
+/**
+ * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ *
+ * The function is similar to kmsg_dump_rewind(), but grabs no locks.
+ */
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+       dumper->cur_seq = clear_seq;
+       dumper->cur_idx = clear_idx;
+       dumper->next_seq = log_next_seq;
+       dumper->next_idx = log_next_idx;
+}
+
+/**
+ * kmsg_dump_rewind - reset the interator
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ */
+void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       kmsg_dump_rewind_nolock(dumper);
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
 }
+EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
 #endif
index 0da7b88d92d0a599242e8dcf70ab566d3e23b687..4b97bba7396e6194db4bf5b644ef8b5a1997c974 100644 (file)
@@ -201,6 +201,7 @@ void rcu_note_context_switch(int cpu)
 {
        trace_rcu_utilization("Start context switch");
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch(cpu);
        trace_rcu_utilization("End context switch");
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@@ -1397,6 +1398,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
        rdp->qlen_lazy += rsp->qlen_lazy;
        rdp->qlen += rsp->qlen;
        rdp->n_cbs_adopted += rsp->qlen;
+       if (rsp->qlen_lazy != rsp->qlen)
+               rcu_idle_count_callbacks_posted();
        rsp->qlen_lazy = 0;
        rsp->qlen = 0;
 
@@ -1528,7 +1531,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 {
        unsigned long flags;
        struct rcu_head *next, *list, **tail;
-       int bl, count, count_lazy;
+       int bl, count, count_lazy, i;
 
        /* If no callbacks are ready, just return.*/
        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -1551,9 +1554,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
        *rdp->nxttail[RCU_DONE_TAIL] = NULL;
        tail = rdp->nxttail[RCU_DONE_TAIL];
-       for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
-               if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
-                       rdp->nxttail[count] = &rdp->nxtlist;
+       for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
+               if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+                       rdp->nxttail[i] = &rdp->nxtlist;
        local_irq_restore(flags);
 
        /* Invoke callbacks. */
@@ -1581,9 +1584,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        if (list != NULL) {
                *tail = rdp->nxtlist;
                rdp->nxtlist = list;
-               for (count = 0; count < RCU_NEXT_SIZE; count++)
-                       if (&rdp->nxtlist == rdp->nxttail[count])
-                               rdp->nxttail[count] = tail;
+               for (i = 0; i < RCU_NEXT_SIZE; i++)
+                       if (&rdp->nxtlist == rdp->nxttail[i])
+                               rdp->nxttail[i] = tail;
                        else
                                break;
        }
index 7f5d138dedf55c94c7f9a8f6ba9953b90f6b146b..19b61ac1079f825702a7f17ae7d8dec88756c141 100644 (file)
@@ -84,6 +84,20 @@ struct rcu_dynticks {
                                    /* Process level is worth LLONG_MAX/2. */
        int dynticks_nmi_nesting;   /* Track NMI nesting level. */
        atomic_t dynticks;          /* Even value for idle, else odd. */
+#ifdef CONFIG_RCU_FAST_NO_HZ
+       int dyntick_drain;          /* Prepare-for-idle state variable. */
+       unsigned long dyntick_holdoff;
+                                   /* No retries for the jiffy of failure. */
+       struct timer_list idle_gp_timer;
+                                   /* Wake up CPU sleeping with callbacks. */
+       unsigned long idle_gp_timer_expires;
+                                   /* When to wake up CPU (for repost). */
+       bool idle_first_pass;       /* First pass of attempt to go idle? */
+       unsigned long nonlazy_posted;
+                                   /* # times non-lazy CBs posted to CPU. */
+       unsigned long nonlazy_posted_snap;
+                                   /* idle-period nonlazy_posted snapshot. */
+#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
 };
 
 /* RCU's kthread states for tracing. */
@@ -430,6 +444,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 /* Forward declarations for rcutree_plugin.h */
 static void rcu_bootup_announce(void);
 long rcu_batches_completed(void);
+static void rcu_preempt_note_context_switch(int cpu);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
index 2411000d98690aacd76d20acc039964402e83388..3e4899459f3d9d7177ee67d41b8ea1121ddb9540 100644 (file)
@@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu)
  *
  * Caller must disable preemption.
  */
-void rcu_preempt_note_context_switch(void)
+static void rcu_preempt_note_context_switch(int cpu)
 {
        struct task_struct *t = current;
        unsigned long flags;
@@ -164,7 +164,7 @@ void rcu_preempt_note_context_switch(void)
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = __this_cpu_ptr(rcu_preempt_state.rda);
+               rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,7 +228,7 @@ void rcu_preempt_note_context_switch(void)
         * means that we continue to block the current grace period.
         */
        local_irq_save(flags);
-       rcu_preempt_qs(smp_processor_id());
+       rcu_preempt_qs(cpu);
        local_irq_restore(flags);
 }
 
@@ -1001,6 +1001,14 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * CPUs being in quiescent states.
+ */
+static void rcu_preempt_note_context_switch(int cpu)
+{
+}
+
 /*
  * Because preemptible RCU does not exist, there are never any preempted
  * RCU readers.
@@ -1886,8 +1894,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
  * any flavor of RCU.
  */
-int rcu_needs_cpu(int cpu)
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+       *delta_jiffies = ULONG_MAX;
        return rcu_cpu_has_callbacks(cpu);
 }
 
@@ -1962,41 +1971,6 @@ static void rcu_idle_count_callbacks_posted(void)
 #define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
 
-/* Loop counter for rcu_prepare_for_idle(). */
-static DEFINE_PER_CPU(int, rcu_dyntick_drain);
-/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
-/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
-static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
-/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
-static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
-/* Enable special processing on first attempt to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
-/* Running count of non-lazy callbacks posted, never decremented. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
-/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
-
-/*
- * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
- * callbacks on this CPU, (2) this CPU has not yet attempted to enter
- * dyntick-idle mode, or (3) this CPU is in the process of attempting to
- * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
- * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
- * it is better to incur scheduling-clock interrupts than to spin
- * continuously for the same time duration!
- */
-int rcu_needs_cpu(int cpu)
-{
-       /* Flag a new idle sojourn to the idle-entry state machine. */
-       per_cpu(rcu_idle_first_pass, cpu) = 1;
-       /* If no callbacks, RCU doesn't need the CPU. */
-       if (!rcu_cpu_has_callbacks(cpu))
-               return 0;
-       /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
-       return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
-}
-
 /*
  * Does the specified flavor of RCU have non-lazy callbacks pending on
  * the specified CPU?  Both RCU flavor and CPU are specified by the
@@ -2039,6 +2013,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
               rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
 }
 
+/*
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ *
+ * The delta_jiffies argument is used to store the time when RCU is
+ * going to need the CPU again if it still has callbacks.  The reason
+ * for this is that rcu_prepare_for_idle() might need to post a timer,
+ * but if so, it will do so after tick_nohz_stop_sched_tick() has set
+ * the wakeup time for this CPU.  This means that RCU's timer can be
+ * delayed until the wakeup time, which defeats the purpose of posting
+ * a timer.
+ */
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+       /* Flag a new idle sojourn to the idle-entry state machine. */
+       rdtp->idle_first_pass = 1;
+       /* If no callbacks, RCU doesn't need the CPU. */
+       if (!rcu_cpu_has_callbacks(cpu)) {
+               *delta_jiffies = ULONG_MAX;
+               return 0;
+       }
+       if (rdtp->dyntick_holdoff == jiffies) {
+               /* RCU recently tried and failed, so don't try again. */
+               *delta_jiffies = 1;
+               return 1;
+       }
+       /* Set up for the possibility that RCU will post a timer. */
+       if (rcu_cpu_has_nonlazy_callbacks(cpu))
+               *delta_jiffies = RCU_IDLE_GP_DELAY;
+       else
+               *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+       return 0;
+}
+
 /*
  * Handler for smp_call_function_single().  The only point of this
  * handler is to wake the CPU up, so the handler does only tracing.
@@ -2075,21 +2090,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in)
  */
 static void rcu_prepare_for_idle_init(int cpu)
 {
-       per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-       setup_timer(&per_cpu(rcu_idle_gp_timer, cpu),
-                   rcu_idle_gp_timer_func, cpu);
-       per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1;
-       per_cpu(rcu_idle_first_pass, cpu) = 1;
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+       rdtp->dyntick_holdoff = jiffies - 1;
+       setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
+       rdtp->idle_gp_timer_expires = jiffies - 1;
+       rdtp->idle_first_pass = 1;
 }
 
 /*
  * Clean up for exit from idle.  Because we are exiting from idle, there
- * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
+ * is no longer any point to ->idle_gp_timer, so cancel it.  This will
  * do nothing if this timer is not active, so just cancel it unconditionally.
  */
 static void rcu_cleanup_after_idle(int cpu)
 {
-       del_timer(&per_cpu(rcu_idle_gp_timer, cpu));
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+       del_timer(&rdtp->idle_gp_timer);
        trace_rcu_prep_idle("Cleanup after idle");
 }
 
@@ -2108,42 +2126,41 @@ static void rcu_cleanup_after_idle(int cpu)
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
- * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ * later.  The ->dyntick_drain field controls the sequencing.
  *
  * The caller must have disabled interrupts.
  */
 static void rcu_prepare_for_idle(int cpu)
 {
        struct timer_list *tp;
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
        /*
         * If this is an idle re-entry, for example, due to use of
         * RCU_NONIDLE() or the new idle-loop tracing API within the idle
         * loop, then don't take any state-machine actions, unless the
         * momentary exit from idle queued additional non-lazy callbacks.
-        * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks
+        * Instead, repost the ->idle_gp_timer if this CPU has callbacks
         * pending.
         */
-       if (!per_cpu(rcu_idle_first_pass, cpu) &&
-           (per_cpu(rcu_nonlazy_posted, cpu) ==
-            per_cpu(rcu_nonlazy_posted_snap, cpu))) {
+       if (!rdtp->idle_first_pass &&
+           (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
                if (rcu_cpu_has_callbacks(cpu)) {
-                       tp = &per_cpu(rcu_idle_gp_timer, cpu);
-                       mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+                       tp = &rdtp->idle_gp_timer;
+                       mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
                }
                return;
        }
-       per_cpu(rcu_idle_first_pass, cpu) = 0;
-       per_cpu(rcu_nonlazy_posted_snap, cpu) =
-               per_cpu(rcu_nonlazy_posted, cpu) - 1;
+       rdtp->idle_first_pass = 0;
+       rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
 
        /*
         * If there are no callbacks on this CPU, enter dyntick-idle mode.
         * Also reset state to avoid prejudicing later attempts.
         */
        if (!rcu_cpu_has_callbacks(cpu)) {
-               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-               per_cpu(rcu_dyntick_drain, cpu) = 0;
+               rdtp->dyntick_holdoff = jiffies - 1;
+               rdtp->dyntick_drain = 0;
                trace_rcu_prep_idle("No callbacks");
                return;
        }
@@ -2152,36 +2169,37 @@ static void rcu_prepare_for_idle(int cpu)
         * If in holdoff mode, just return.  We will presumably have
         * refrained from disabling the scheduling-clock tick.
         */
-       if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+       if (rdtp->dyntick_holdoff == jiffies) {
                trace_rcu_prep_idle("In holdoff");
                return;
        }
 
-       /* Check and update the rcu_dyntick_drain sequencing. */
-       if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+       /* Check and update the ->dyntick_drain sequencing. */
+       if (rdtp->dyntick_drain <= 0) {
                /* First time through, initialize the counter. */
-               per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
-       } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+               rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
+       } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
                   !rcu_pending(cpu) &&
                   !local_softirq_pending()) {
                /* Can we go dyntick-idle despite still having callbacks? */
-               trace_rcu_prep_idle("Dyntick with callbacks");
-               per_cpu(rcu_dyntick_drain, cpu) = 0;
-               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-               if (rcu_cpu_has_nonlazy_callbacks(cpu))
-                       per_cpu(rcu_idle_gp_timer_expires, cpu) =
+               rdtp->dyntick_drain = 0;
+               rdtp->dyntick_holdoff = jiffies;
+               if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+                       trace_rcu_prep_idle("Dyntick with callbacks");
+                       rdtp->idle_gp_timer_expires =
                                           jiffies + RCU_IDLE_GP_DELAY;
-               else
-                       per_cpu(rcu_idle_gp_timer_expires, cpu) =
+               } else {
+                       rdtp->idle_gp_timer_expires =
                                           jiffies + RCU_IDLE_LAZY_GP_DELAY;
-               tp = &per_cpu(rcu_idle_gp_timer, cpu);
-               mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
-               per_cpu(rcu_nonlazy_posted_snap, cpu) =
-                       per_cpu(rcu_nonlazy_posted, cpu);
+                       trace_rcu_prep_idle("Dyntick with lazy callbacks");
+               }
+               tp = &rdtp->idle_gp_timer;
+               mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
+               rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
                return; /* Nothing more to do immediately. */
-       } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+       } else if (--(rdtp->dyntick_drain) <= 0) {
                /* We have hit the limit, so time to give up. */
-               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+               rdtp->dyntick_holdoff = jiffies;
                trace_rcu_prep_idle("Begin holdoff");
                invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
                return;
@@ -2227,7 +2245,7 @@ static void rcu_prepare_for_idle(int cpu)
  */
 static void rcu_idle_count_callbacks_posted(void)
 {
-       __this_cpu_add(rcu_nonlazy_posted, 1);
+       __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -2238,11 +2256,12 @@ static void rcu_idle_count_callbacks_posted(void)
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-       struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu);
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+       struct timer_list *tltp = &rdtp->idle_gp_timer;
 
        sprintf(cp, "drain=%d %c timer=%lu",
-               per_cpu(rcu_dyntick_drain, cpu),
-               per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
+               rdtp->dyntick_drain,
+               rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
                timer_pending(tltp) ? tltp->expires - jiffies : -1);
 }
 
index ab56a1764d4db8c6a5136cd3b636dc330648783b..e8cd2027abbd1e2e0ed6d432974816cef2845a42 100644 (file)
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .nr_pages = 0,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .partial = partial,
                .flags = flags,
                .ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
 
 out:
-       splice_shrink_spd(pipe, &spd);
-        return ret;
+       splice_shrink_spd(&spd);
+       return ret;
 }
 
 static ssize_t relay_file_splice_read(struct file *in,
index 7e8ea66a8c016ffc934997256835f3fdc735f2f8..e1d2b8ee76d5bcd22d552c21cdf2102eaf6c7627 100644 (file)
@@ -515,8 +515,8 @@ out:
  * @root: root resource descriptor
  * @new: resource descriptor desired by caller
  * @size: requested resource region size
- * @min: minimum size to allocate
- * @max: maximum size to allocate
+ * @min: minimum boundary to allocate
+ * @max: maximum boundary to allocate
  * @align: alignment requested, in bytes
  * @alignf: alignment function, optional, called if not NULL
  * @alignf_data: arbitrary data to pass to the @alignf function
index 39eb6011bc38e3f20188c942cb31a173fce74093..468bdd44c1baeb914cfc93037b86691b839ccc46 100644 (file)
@@ -142,9 +142,8 @@ const_debug unsigned int sysctl_sched_features =
 #define SCHED_FEAT(name, enabled)      \
        #name ,
 
-static __read_mostly char *sched_feat_names[] = {
+static const char * const sched_feat_names[] = {
 #include "features.h"
-       NULL
 };
 
 #undef SCHED_FEAT
@@ -2082,7 +2081,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
 #endif
 
        /* Here we just switch the register state and the stack. */
-       rcu_switch_from(prev);
        switch_to(prev, next, prev);
 
        barrier();
@@ -2162,11 +2160,73 @@ unsigned long this_cpu_load(void)
 }
 
 
+/*
+ * Global load-average calculations
+ *
+ * We take a distributed and async approach to calculating the global load-avg
+ * in order to minimize overhead.
+ *
+ * The global load average is an exponentially decaying average of nr_running +
+ * nr_uninterruptible.
+ *
+ * Once every LOAD_FREQ:
+ *
+ *   nr_active = 0;
+ *   for_each_possible_cpu(cpu)
+ *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
+ *
+ *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
+ *
+ * Due to a number of reasons the above turns in the mess below:
+ *
+ *  - for_each_possible_cpu() is prohibitively expensive on machines with
+ *    serious number of cpus, therefore we need to take a distributed approach
+ *    to calculating nr_active.
+ *
+ *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
+ *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
+ *
+ *    So assuming nr_active := 0 when we start out -- true per definition, we
+ *    can simply take per-cpu deltas and fold those into a global accumulate
+ *    to obtain the same result. See calc_load_fold_active().
+ *
+ *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ *    across the machine, we assume 10 ticks is sufficient time for every
+ *    cpu to have completed this task.
+ *
+ *    This places an upper-bound on the IRQ-off latency of the machine. Then
+ *    again, being late doesn't loose the delta, just wrecks the sample.
+ *
+ *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
+ *    this would add another cross-cpu cacheline miss and atomic operation
+ *    to the wakeup path. Instead we increment on whatever cpu the task ran
+ *    when it went into uninterruptible state and decrement on whatever cpu
+ *    did the wakeup. This means that only the sum of nr_uninterruptible over
+ *    all cpus yields the correct result.
+ *
+ *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ */
+
 /* Variables and functions for calc_load */
 static atomic_long_t calc_load_tasks;
 static unsigned long calc_load_update;
 unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun);
+EXPORT_SYMBOL(avenrun); /* should be removed */
+
+/**
+ * get_avenrun - get the load average array
+ * @loads:     pointer to dest load array
+ * @offset:    offset to add
+ * @shift:     shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+       loads[0] = (avenrun[0] + offset) << shift;
+       loads[1] = (avenrun[1] + offset) << shift;
+       loads[2] = (avenrun[2] + offset) << shift;
+}
 
 static long calc_load_fold_active(struct rq *this_rq)
 {
@@ -2183,6 +2243,9 @@ static long calc_load_fold_active(struct rq *this_rq)
        return delta;
 }
 
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
 static unsigned long
 calc_load(unsigned long load, unsigned long exp, unsigned long active)
 {
@@ -2194,30 +2257,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
 
 #ifdef CONFIG_NO_HZ
 /*
- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ * Handle NO_HZ for the global load-average.
+ *
+ * Since the above described distributed algorithm to compute the global
+ * load-average relies on per-cpu sampling from the tick, it is affected by
+ * NO_HZ.
+ *
+ * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * when we read the global state.
+ *
+ * Obviously reality has to ruin such a delightfully simple scheme:
+ *
+ *  - When we go NO_HZ idle during the window, we can negate our sample
+ *    contribution, causing under-accounting.
+ *
+ *    We avoid this by keeping two idle-delta counters and flipping them
+ *    when the window starts, thus separating old and new NO_HZ load.
+ *
+ *    The only trick is the slight shift in index flip for read vs write.
+ *
+ *        0s            5s            10s           15s
+ *          +10           +10           +10           +10
+ *        |-|-----------|-|-----------|-|-----------|-|
+ *    r:0 0 1           1 0           0 1           1 0
+ *    w:0 1 1           0 0           1 1           0 0
+ *
+ *    This ensures we'll fold the old idle contribution in this window while
+ *    accumlating the new one.
+ *
+ *  - When we wake up from NO_HZ idle during the window, we push up our
+ *    contribution, since we effectively move our sample point to a known
+ *    busy state.
+ *
+ *    This is solved by pushing the window forward, and thus skipping the
+ *    sample, for this cpu (effectively using the idle-delta for this cpu which
+ *    was in effect at the time the window opened). This also solves the issue
+ *    of having to deal with a cpu having been in NOHZ idle for multiple
+ *    LOAD_FREQ intervals.
  *
  * When making the ILB scale, we should try to pull this in as well.
  */
-static atomic_long_t calc_load_tasks_idle;
+static atomic_long_t calc_load_idle[2];
+static int calc_load_idx;
+
+static inline int calc_load_write_idx(void)
+{
+       int idx = calc_load_idx;
+
+       /*
+        * See calc_global_nohz(), if we observe the new index, we also
+        * need to observe the new update time.
+        */
+       smp_rmb();
+
+       /*
+        * If the folding window started, make sure we start writing in the
+        * next idle-delta.
+        */
+       if (!time_before(jiffies, calc_load_update))
+               idx++;
 
-void calc_load_account_idle(struct rq *this_rq)
+       return idx & 1;
+}
+
+static inline int calc_load_read_idx(void)
 {
+       return calc_load_idx & 1;
+}
+
+void calc_load_enter_idle(void)
+{
+       struct rq *this_rq = this_rq();
        long delta;
 
+       /*
+        * We're going into NOHZ mode, if there's any pending delta, fold it
+        * into the pending idle delta.
+        */
        delta = calc_load_fold_active(this_rq);
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks_idle);
+       if (delta) {
+               int idx = calc_load_write_idx();
+               atomic_long_add(delta, &calc_load_idle[idx]);
+       }
 }
 
-static long calc_load_fold_idle(void)
+void calc_load_exit_idle(void)
 {
-       long delta = 0;
+       struct rq *this_rq = this_rq();
 
        /*
-        * Its got a race, we don't care...
+        * If we're still before the sample window, we're done.
         */
-       if (atomic_long_read(&calc_load_tasks_idle))
-               delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
+
+       /*
+        * We woke inside or after the sample window, this means we're already
+        * accounted through the nohz accounting, so skip the entire deal and
+        * sync up for the next window.
+        */
+       this_rq->calc_load_update = calc_load_update;
+       if (time_before(jiffies, this_rq->calc_load_update + 10))
+               this_rq->calc_load_update += LOAD_FREQ;
+}
+
+static long calc_load_fold_idle(void)
+{
+       int idx = calc_load_read_idx();
+       long delta = 0;
+
+       if (atomic_long_read(&calc_load_idle[idx]))
+               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
 
        return delta;
 }
@@ -2303,66 +2454,39 @@ static void calc_global_nohz(void)
 {
        long delta, active, n;
 
-       /*
-        * If we crossed a calc_load_update boundary, make sure to fold
-        * any pending idle changes, the respective CPUs might have
-        * missed the tick driven calc_load_account_active() update
-        * due to NO_HZ.
-        */
-       delta = calc_load_fold_idle();
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       /*
-        * It could be the one fold was all it took, we done!
-        */
-       if (time_before(jiffies, calc_load_update + 10))
-               return;
-
-       /*
-        * Catch-up, fold however many we are behind still
-        */
-       delta = jiffies - calc_load_update - 10;
-       n = 1 + (delta / LOAD_FREQ);
+       if (!time_before(jiffies, calc_load_update + 10)) {
+               /*
+                * Catch-up, fold however many we are behind still
+                */
+               delta = jiffies - calc_load_update - 10;
+               n = 1 + (delta / LOAD_FREQ);
 
-       active = atomic_long_read(&calc_load_tasks);
-       active = active > 0 ? active * FIXED_1 : 0;
+               active = atomic_long_read(&calc_load_tasks);
+               active = active > 0 ? active * FIXED_1 : 0;
 
-       avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-       avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-       avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-       calc_load_update += n * LOAD_FREQ;
-}
-#else
-void calc_load_account_idle(struct rq *this_rq)
-{
-}
+               calc_load_update += n * LOAD_FREQ;
+       }
 
-static inline long calc_load_fold_idle(void)
-{
-       return 0;
+       /*
+        * Flip the idle index...
+        *
+        * Make sure we first write the new time then flip the index, so that
+        * calc_load_write_idx() will see the new time when it reads the new
+        * index, this avoids a double flip messing things up.
+        */
+       smp_wmb();
+       calc_load_idx++;
 }
+#else /* !CONFIG_NO_HZ */
 
-static void calc_global_nohz(void)
-{
-}
-#endif
+static inline long calc_load_fold_idle(void) { return 0; }
+static inline void calc_global_nohz(void) { }
 
-/**
- * get_avenrun - get the load average array
- * @loads:     pointer to dest load array
- * @offset:    offset to add
- * @shift:     shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       loads[0] = (avenrun[0] + offset) << shift;
-       loads[1] = (avenrun[1] + offset) << shift;
-       loads[2] = (avenrun[2] + offset) << shift;
-}
+#endif /* CONFIG_NO_HZ */
 
 /*
  * calc_load - update the avenrun load estimates 10 ticks after the
@@ -2370,11 +2494,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  */
 void calc_global_load(unsigned long ticks)
 {
-       long active;
+       long active, delta;
 
        if (time_before(jiffies, calc_load_update + 10))
                return;
 
+       /*
+        * Fold the 'old' idle-delta to include all NO_HZ cpus.
+        */
+       delta = calc_load_fold_idle();
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
+
        active = atomic_long_read(&calc_load_tasks);
        active = active > 0 ? active * FIXED_1 : 0;
 
@@ -2385,12 +2516,7 @@ void calc_global_load(unsigned long ticks)
        calc_load_update += LOAD_FREQ;
 
        /*
-        * Account one period with whatever state we found before
-        * folding in the nohz state and ageing the entire idle period.
-        *
-        * This avoids loosing a sample when we go idle between 
-        * calc_load_account_active() (10 ticks ago) and now and thus
-        * under-accounting.
+        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
         */
        calc_global_nohz();
 }
@@ -2407,13 +2533,16 @@ static void calc_load_account_active(struct rq *this_rq)
                return;
 
        delta  = calc_load_fold_active(this_rq);
-       delta += calc_load_fold_idle();
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
        this_rq->calc_load_update += LOAD_FREQ;
 }
 
+/*
+ * End of global load-average stuff
+ */
+
 /*
  * The exact cpuload at various idx values, calculated at every tick would be
  * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
@@ -2517,25 +2646,32 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
        sched_avg_update(this_rq);
 }
 
+#ifdef CONFIG_NO_HZ
+/*
+ * There is no sane way to deal with nohz on smp when using jiffies because the
+ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
+ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
+ *
+ * Therefore we cannot use the delta approach from the regular tick since that
+ * would seriously skew the load calculation. However we'll make do for those
+ * updates happening while idle (nohz_idle_balance) or coming out of idle
+ * (tick_nohz_idle_exit).
+ *
+ * This means we might still be one tick off for nohz periods.
+ */
+
 /*
  * Called from nohz_idle_balance() to update the load ratings before doing the
  * idle balance.
  */
 void update_idle_cpu_load(struct rq *this_rq)
 {
-       unsigned long curr_jiffies = jiffies;
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
        unsigned long load = this_rq->load.weight;
        unsigned long pending_updates;
 
        /*
-        * Bloody broken means of dealing with nohz, but better than nothing..
-        * jiffies is updated by one cpu, another cpu can drift wrt the jiffy
-        * update and see 0 difference the one time and 2 the next, even though
-        * we ticked at roughtly the same rate.
-        *
-        * Hence we only use this from nohz_idle_balance() and skip this
-        * nonsense when called from the scheduler_tick() since that's
-        * guaranteed a stable rate.
+        * bail if there's load or we're actually up-to-date.
         */
        if (load || curr_jiffies == this_rq->last_load_update_tick)
                return;
@@ -2546,13 +2682,39 @@ void update_idle_cpu_load(struct rq *this_rq)
        __update_cpu_load(this_rq, load, pending_updates);
 }
 
+/*
+ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+ */
+void update_cpu_load_nohz(void)
+{
+       struct rq *this_rq = this_rq();
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+       unsigned long pending_updates;
+
+       if (curr_jiffies == this_rq->last_load_update_tick)
+               return;
+
+       raw_spin_lock(&this_rq->lock);
+       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+       if (pending_updates) {
+               this_rq->last_load_update_tick = curr_jiffies;
+               /*
+                * We were idle, this means load 0, the current load might be
+                * !0 due to remote wakeups and the sort.
+                */
+               __update_cpu_load(this_rq, 0, pending_updates);
+       }
+       raw_spin_unlock(&this_rq->lock);
+}
+#endif /* CONFIG_NO_HZ */
+
 /*
  * Called from scheduler_tick()
  */
 static void update_cpu_load_active(struct rq *this_rq)
 {
        /*
-        * See the mess in update_idle_cpu_load().
+        * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
         */
        this_rq->last_load_update_tick = jiffies;
        __update_cpu_load(this_rq, this_rq->load.weight, 1);
@@ -4982,7 +5144,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
                p->sched_class->set_cpus_allowed(p, new_mask);
 
        cpumask_copy(&p->cpus_allowed, new_mask);
-       p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+       p->nr_cpus_allowed = cpumask_weight(new_mask);
 }
 
 /*
@@ -5524,15 +5686,20 @@ static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
 
 #ifdef CONFIG_SCHED_DEBUG
 
-static __read_mostly int sched_domain_debug_enabled;
+static __read_mostly int sched_debug_enabled;
 
-static int __init sched_domain_debug_setup(char *str)
+static int __init sched_debug_setup(char *str)
 {
-       sched_domain_debug_enabled = 1;
+       sched_debug_enabled = 1;
 
        return 0;
 }
-early_param("sched_debug", sched_domain_debug_setup);
+early_param("sched_debug", sched_debug_setup);
+
+static inline bool sched_debug(void)
+{
+       return sched_debug_enabled;
+}
 
 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                  struct cpumask *groupmask)
@@ -5572,7 +5739,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!group->sgp->power) {
+               /*
+                * Even though we initialize ->power to something semi-sane,
+                * we leave power_orig unset. This allows us to detect if
+                * domain iteration is still funny without causing /0 traps.
+                */
+               if (!group->sgp->power_orig) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
@@ -5620,7 +5792,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
 {
        int level = 0;
 
-       if (!sched_domain_debug_enabled)
+       if (!sched_debug_enabled)
                return;
 
        if (!sd) {
@@ -5641,6 +5813,10 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
 }
 #else /* !CONFIG_SCHED_DEBUG */
 # define sched_domain_debug(sd, cpu) do { } while (0)
+static inline bool sched_debug(void)
+{
+       return false;
+}
 #endif /* CONFIG_SCHED_DEBUG */
 
 static int sd_degenerate(struct sched_domain *sd)
@@ -5962,6 +6138,44 @@ struct sched_domain_topology_level {
        struct sd_data      data;
 };
 
+/*
+ * Build an iteration mask that can exclude certain CPUs from the upwards
+ * domain traversal.
+ *
+ * Asymmetric node setups can result in situations where the domain tree is of
+ * unequal depth, make sure to skip domains that already cover the entire
+ * range.
+ *
+ * In that case build_sched_domains() will have terminated the iteration early
+ * and our sibling sd spans will be empty. Domains should always include the
+ * cpu they're built on, so check that.
+ *
+ */
+static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+{
+       const struct cpumask *span = sched_domain_span(sd);
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *sibling;
+       int i;
+
+       for_each_cpu(i, span) {
+               sibling = *per_cpu_ptr(sdd->sd, i);
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+                       continue;
+
+               cpumask_set_cpu(i, sched_group_mask(sg));
+       }
+}
+
+/*
+ * Return the canonical balance cpu for this group, this is the first cpu
+ * of this group that's also in the iteration mask.
+ */
+int group_balance_cpu(struct sched_group *sg)
+{
+       return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
+}
+
 static int
 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 {
@@ -5980,6 +6194,12 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                if (cpumask_test_cpu(i, covered))
                        continue;
 
+               child = *per_cpu_ptr(sdd->sd, i);
+
+               /* See the comment near build_group_mask(). */
+               if (!cpumask_test_cpu(i, sched_domain_span(child)))
+                       continue;
+
                sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
                                GFP_KERNEL, cpu_to_node(cpu));
 
@@ -5987,8 +6207,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                        goto fail;
 
                sg_span = sched_group_cpus(sg);
-
-               child = *per_cpu_ptr(sdd->sd, i);
                if (child->child) {
                        child = child->child;
                        cpumask_copy(sg_span, sched_domain_span(child));
@@ -5997,10 +6215,24 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
                cpumask_or(covered, covered, sg_span);
 
-               sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
-               atomic_inc(&sg->sgp->ref);
+               sg->sgp = *per_cpu_ptr(sdd->sgp, i);
+               if (atomic_inc_return(&sg->sgp->ref) == 1)
+                       build_group_mask(sd, sg);
 
-               if (cpumask_test_cpu(cpu, sg_span))
+               /*
+                * Initialize sgp->power such that even if we mess up the
+                * domains and no possible iteration will get us here, we won't
+                * die on a /0 trap.
+                */
+               sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+
+               /*
+                * Make sure the first group of this domain contains the
+                * canonical balance cpu. Otherwise the sched_domain iteration
+                * breaks. See update_sg_lb_stats().
+                */
+               if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
+                   group_balance_cpu(sg) == cpu)
                        groups = sg;
 
                if (!first)
@@ -6074,6 +6306,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
 
                cpumask_clear(sched_group_cpus(sg));
                sg->sgp->power = 0;
+               cpumask_setall(sched_group_mask(sg));
 
                for_each_cpu(j, span) {
                        if (get_group(j, sdd, NULL) != group)
@@ -6115,7 +6348,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
                sg = sg->next;
        } while (sg != sd->groups);
 
-       if (cpu != group_first_cpu(sg))
+       if (cpu != group_balance_cpu(sg))
                return;
 
        update_group_power(sd, cpu);
@@ -6165,11 +6398,8 @@ int sched_domain_level_max;
 
 static int __init setup_relax_domain_level(char *str)
 {
-       unsigned long val;
-
-       val = simple_strtoul(str, NULL, 0);
-       if (val < sched_domain_level_max)
-               default_relax_domain_level = val;
+       if (kstrtoint(str, 0, &default_relax_domain_level))
+               pr_warn("Unable to set relax_domain_level\n");
 
        return 1;
 }
@@ -6279,14 +6509,13 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol
 #ifdef CONFIG_NUMA
 
 static int sched_domains_numa_levels;
-static int sched_domains_numa_scale;
 static int *sched_domains_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 
 static inline int sd_local_flags(int level)
 {
-       if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
+       if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
                return 0;
 
        return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
@@ -6344,6 +6573,42 @@ static const struct cpumask *sd_numa_mask(int cpu)
        return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
 }
 
+static void sched_numa_warn(const char *str)
+{
+       static int done = false;
+       int i,j;
+
+       if (done)
+               return;
+
+       done = true;
+
+       printk(KERN_WARNING "ERROR: %s\n\n", str);
+
+       for (i = 0; i < nr_node_ids; i++) {
+               printk(KERN_WARNING "  ");
+               for (j = 0; j < nr_node_ids; j++)
+                       printk(KERN_CONT "%02d ", node_distance(i,j));
+               printk(KERN_CONT "\n");
+       }
+       printk(KERN_WARNING "\n");
+}
+
+static bool find_numa_distance(int distance)
+{
+       int i;
+
+       if (distance == node_distance(0, 0))
+               return true;
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               if (sched_domains_numa_distance[i] == distance)
+                       return true;
+       }
+
+       return false;
+}
+
 static void sched_init_numa(void)
 {
        int next_distance, curr_distance = node_distance(0, 0);
@@ -6351,7 +6616,6 @@ static void sched_init_numa(void)
        int level = 0;
        int i, j, k;
 
-       sched_domains_numa_scale = curr_distance;
        sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
        if (!sched_domains_numa_distance)
                return;
@@ -6362,23 +6626,41 @@ static void sched_init_numa(void)
         *
         * Assumes node_distance(0,j) includes all distances in
         * node_distance(i,j) in order to avoid cubic time.
-        *
-        * XXX: could be optimized to O(n log n) by using sort()
         */
        next_distance = curr_distance;
        for (i = 0; i < nr_node_ids; i++) {
                for (j = 0; j < nr_node_ids; j++) {
-                       int distance = node_distance(0, j);
-                       if (distance > curr_distance &&
-                                       (distance < next_distance ||
-                                        next_distance == curr_distance))
-                               next_distance = distance;
+                       for (k = 0; k < nr_node_ids; k++) {
+                               int distance = node_distance(i, k);
+
+                               if (distance > curr_distance &&
+                                   (distance < next_distance ||
+                                    next_distance == curr_distance))
+                                       next_distance = distance;
+
+                               /*
+                                * While not a strong assumption it would be nice to know
+                                * about cases where if node A is connected to B, B is not
+                                * equally connected to A.
+                                */
+                               if (sched_debug() && node_distance(k, i) != distance)
+                                       sched_numa_warn("Node-distance not symmetric");
+
+                               if (sched_debug() && i && !find_numa_distance(distance))
+                                       sched_numa_warn("Node-0 not representative");
+                       }
+                       if (next_distance != curr_distance) {
+                               sched_domains_numa_distance[level++] = next_distance;
+                               sched_domains_numa_levels = level;
+                               curr_distance = next_distance;
+                       } else break;
                }
-               if (next_distance != curr_distance) {
-                       sched_domains_numa_distance[level++] = next_distance;
-                       sched_domains_numa_levels = level;
-                       curr_distance = next_distance;
-               } else break;
+
+               /*
+                * In case of sched_debug() we verify the above assumption.
+                */
+               if (!sched_debug())
+                       break;
        }
        /*
         * 'level' contains the number of unique distances, excluding the
@@ -6403,7 +6685,7 @@ static void sched_init_numa(void)
                        return;
 
                for (j = 0; j < nr_node_ids; j++) {
-                       struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j);
+                       struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
                        if (!mask)
                                return;
 
@@ -6490,7 +6772,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 
                        *per_cpu_ptr(sdd->sg, j) = sg;
 
-                       sgp = kzalloc_node(sizeof(struct sched_group_power),
+                       sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
                        if (!sgp)
                                return -ENOMEM;
@@ -6543,7 +6825,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
        if (!sd)
                return child;
 
-       set_domain_attribute(sd, attr);
        cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
        if (child) {
                sd->level = child->level + 1;
@@ -6551,6 +6832,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                child->parent = sd;
        }
        sd->child = child;
+       set_domain_attribute(sd, attr);
 
        return sd;
 }
@@ -6691,7 +6973,6 @@ static int init_sched_domains(const struct cpumask *cpu_map)
        if (!doms_cur)
                doms_cur = &fallback_doms;
        cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
-       dattr_cur = NULL;
        err = build_sched_domains(doms_cur[0], NULL);
        register_sched_domain_sysctl();
 
index 940e6d17cf96a333fd7ea0c4543e2578effd4638..c099cc6eebe3a6f0cf19dde597c2df583ce57081 100644 (file)
@@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
        int want_sd = 1;
        int sync = wake_flags & WF_SYNC;
 
-       if (p->rt.nr_cpus_allowed == 1)
+       if (p->nr_cpus_allowed == 1)
                return prev_cpu;
 
        if (sd_flag & SD_BALANCE_WAKE) {
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
 unsigned long scale_rt_power(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       u64 total, available;
+       u64 total, available, age_stamp, avg;
 
-       total = sched_avg_period() + (rq->clock - rq->age_stamp);
+       /*
+        * Since we're reading these variables without serialization make sure
+        * we read them once before doing sanity checks on them.
+        */
+       age_stamp = ACCESS_ONCE(rq->age_stamp);
+       avg = ACCESS_ONCE(rq->rt_avg);
+
+       total = sched_avg_period() + (rq->clock - age_stamp);
 
-       if (unlikely(total < rq->rt_avg)) {
+       if (unlikely(total < avg)) {
                /* Ensures that power won't end up being negative */
                available = 0;
        } else {
-               available = total - rq->rt_avg;
+               available = total - avg;
        }
 
        if (unlikely((s64)total < SCHED_POWER_SCALE))
@@ -3574,13 +3581,28 @@ void update_group_power(struct sched_domain *sd, int cpu)
 
        power = 0;
 
-       group = child->groups;
-       do {
-               power += group->sgp->power;
-               group = group->next;
-       } while (group != child->groups);
+       if (child->flags & SD_OVERLAP) {
+               /*
+                * SD_OVERLAP domains cannot assume that child groups
+                * span the current group.
+                */
 
-       sdg->sgp->power = power;
+               for_each_cpu(cpu, sched_group_cpus(sdg))
+                       power += power_of(cpu);
+       } else  {
+               /*
+                * !SD_OVERLAP domains can assume that child groups
+                * span the current group.
+                */ 
+
+               group = child->groups;
+               do {
+                       power += group->sgp->power;
+                       group = group->next;
+               } while (group != child->groups);
+       }
+
+       sdg->sgp->power_orig = sdg->sgp->power = power;
 }
 
 /*
@@ -3610,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
- * @sd: The sched_domain whose statistics are to be updated.
+ * @env: The load balancing environment.
  * @group: sched_group whose statistics are to be updated.
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
@@ -3630,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        int i;
 
        if (local_group)
-               balance_cpu = group_first_cpu(group);
+               balance_cpu = group_balance_cpu(group);
 
        /* Tally up the load of all CPUs in the group */
        max_cpu_load = 0;
@@ -3645,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
                /* Bias balancing toward cpus of our domain */
                if (local_group) {
-                       if (idle_cpu(i) && !first_idle_cpu) {
+                       if (idle_cpu(i) && !first_idle_cpu &&
+                                       cpumask_test_cpu(i, sched_group_mask(group))) {
                                first_idle_cpu = 1;
                                balance_cpu = i;
                        }
@@ -3719,11 +3742,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
 /**
  * update_sd_pick_busiest - return 1 on busiest group
- * @sd: sched_domain whose statistics are to be checked
+ * @env: The load balancing environment.
  * @sds: sched_domain statistics
  * @sg: sched_group candidate to be checked for being the busiest
  * @sgs: sched_group statistics
- * @this_cpu: the current cpu
  *
  * Determine if @sg is a busier group than the previously selected
  * busiest group.
@@ -3761,9 +3783,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 
 /**
  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
- * @sd: sched_domain whose statistics are to be updated.
- * @this_cpu: Cpu for which load balance is currently performed.
- * @idle: Idle status of this_cpu
+ * @env: The load balancing environment.
  * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
  * @sds: variable to hold the statistics for this sched_domain.
@@ -3852,10 +3872,8 @@ static inline void update_sd_lb_stats(struct lb_env *env,
  * Returns 1 when packing is required and a task should be moved to
  * this CPU.  The amount of the imbalance is returned in *imbalance.
  *
- * @sd: The sched_domain whose packing is to be checked.
+ * @env: The load balancing environment.
  * @sds: Statistics of the sched_domain which is to be packed
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: returns amount of imbalanced due to packing.
  */
 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
 {
@@ -3881,9 +3899,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
  * fix_small_imbalance - Calculate the minor imbalance that exists
  *                     amongst the groups of a sched_domain, during
  *                     load balancing.
+ * @env: The load balancing environment.
  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: Variable to store the imbalance.
  */
 static inline
 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
@@ -4026,11 +4043,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
  * Also calculates the amount of weighted load which should be moved
  * to restore balance.
  *
- * @sd: The sched_domain whose busiest group is to be returned.
- * @this_cpu: The cpu for which load balancing is currently being performed.
- * @imbalance: Variable which stores amount of weighted load which should
- *             be moved to restore balance/put a group to idle.
- * @idle: The idle status of this_cpu.
+ * @env: The load balancing environment.
  * @cpus: The set of CPUs under consideration for load-balancing.
  * @balance: Pointer to a variable indicating if this_cpu
  *     is the appropriate cpu to perform load balancing at this_level.
index b44d604b35d1a8b4164aa2ea4d94d00bf9063824..b6baf370cae973707c24389b30b38e43c76bb4d7 100644 (file)
@@ -25,7 +25,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 static struct task_struct *pick_next_task_idle(struct rq *rq)
 {
        schedstat_inc(rq, sched_goidle);
-       calc_load_account_idle(rq);
        return rq->idle;
 }
 
index c5565c3c515fd2d15dc5ed95f59a970fca087815..573e1ca01102066468e65aee96c57ea30cccd6de 100644 (file)
@@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq)
 
 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+       struct task_struct *p;
+
        if (!rt_entity_is_task(rt_se))
                return;
 
+       p = rt_task_of(rt_se);
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total++;
-       if (rt_se->nr_cpus_allowed > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory++;
 
        update_rt_migration(rt_rq);
@@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+       struct task_struct *p;
+
        if (!rt_entity_is_task(rt_se))
                return;
 
+       p = rt_task_of(rt_se);
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total--;
-       if (rt_se->nr_cpus_allowed > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory--;
 
        update_rt_migration(rt_rq);
@@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
-       if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+       if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 
        inc_nr_running(rq);
@@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
 
        cpu = task_cpu(p);
 
-       if (p->rt.nr_cpus_allowed == 1)
+       if (p->nr_cpus_allowed == 1)
                goto out;
 
        /* For anything but wake ups, just return the task_cpu */
@@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
         * will have to sort it out.
         */
        if (curr && unlikely(rt_task(curr)) &&
-           (curr->rt.nr_cpus_allowed < 2 ||
+           (curr->nr_cpus_allowed < 2 ||
             curr->prio <= p->prio) &&
-           (p->rt.nr_cpus_allowed > 1)) {
+           (p->nr_cpus_allowed > 1)) {
                int target = find_lowest_rq(p);
 
                if (target != -1)
@@ -1276,10 +1282,10 @@ out:
 
 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 {
-       if (rq->curr->rt.nr_cpus_allowed == 1)
+       if (rq->curr->nr_cpus_allowed == 1)
                return;
 
-       if (p->rt.nr_cpus_allowed != 1
+       if (p->nr_cpus_allowed != 1
            && cpupri_find(&rq->rd->cpupri, p, NULL))
                return;
 
@@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
         * The previous task needs to be made eligible for pushing
         * if it is still active
         */
-       if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
+       if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 }
 
@@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
            (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
-           (p->rt.nr_cpus_allowed > 1))
+           (p->nr_cpus_allowed > 1))
                return 1;
        return 0;
 }
@@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task)
        if (unlikely(!lowest_mask))
                return -1;
 
-       if (task->rt.nr_cpus_allowed == 1)
+       if (task->nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
@@ -1556,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                                     task_running(rq, task) ||
                                     !task->on_rq)) {
 
-                               raw_spin_unlock(&lowest_rq->lock);
+                               double_unlock_balance(rq, lowest_rq);
                                lowest_rq = NULL;
                                break;
                        }
@@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
 
        BUG_ON(rq->cpu != task_cpu(p));
        BUG_ON(task_current(rq, p));
-       BUG_ON(p->rt.nr_cpus_allowed <= 1);
+       BUG_ON(p->nr_cpus_allowed <= 1);
 
        BUG_ON(!p->on_rq);
        BUG_ON(!rt_task(p));
@@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
            has_pushable_tasks(rq) &&
-           p->rt.nr_cpus_allowed > 1 &&
+           p->nr_cpus_allowed > 1 &&
            rt_task(rq->curr) &&
-           (rq->curr->rt.nr_cpus_allowed < 2 ||
+           (rq->curr->nr_cpus_allowed < 2 ||
             rq->curr->prio <= p->prio))
                push_rt_tasks(rq);
 }
@@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
         * Only update if the process changes its state from whether it
         * can migrate or not.
         */
-       if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
+       if ((p->nr_cpus_allowed > 1) == (weight > 1))
                return;
 
        rq = task_rq(p);
@@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p)
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 {
+       struct sched_rt_entity *rt_se = &p->rt;
+
        update_curr_rt(rq);
 
        watchdog(rq, p);
@@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        p->rt.time_slice = RR_TIMESLICE;
 
        /*
-        * Requeue to the end of queue if we are not the only element
-        * on the queue:
+        * Requeue to the end of queue if we (and all of our ancestors) are the
+        * only element on the queue
         */
-       if (p->rt.run_list.prev != p->rt.run_list.next) {
-               requeue_task_rt(rq, p, 0);
-               set_tsk_need_resched(p);
+       for_each_sched_rt_entity(rt_se) {
+               if (rt_se->run_list.prev != rt_se->run_list.next) {
+                       requeue_task_rt(rq, p, 0);
+                       set_tsk_need_resched(p);
+                       return;
+               }
        }
 }
 
index ba9dccfd24ce95b2b198257fb5e787e21da36d41..55844f24435a0ad5c1c50561a54424ce4b25992d 100644 (file)
@@ -526,6 +526,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 DECLARE_PER_CPU(int, sd_llc_id);
 
+extern int group_balance_cpu(struct sched_group *sg);
+
 #endif /* CONFIG_SMP */
 
 #include "stats.h"
@@ -940,8 +942,6 @@ static inline u64 sched_avg_period(void)
        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
 }
 
-void calc_load_account_idle(struct rq *this_rq);
-
 #ifdef CONFIG_SCHED_HRTICK
 
 /*
index f7b4182176331c2f3c667117fa60d48040d44a9e..677102789cf22d4847936782f6c6f67085421927 100644 (file)
@@ -1656,19 +1656,18 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        info.si_signo = sig;
        info.si_errno = 0;
        /*
-        * we are under tasklist_lock here so our parent is tied to
-        * us and cannot exit and release its namespace.
+        * We are under tasklist_lock here so our parent is tied to
+        * us and cannot change.
         *
-        * the only it can is to switch its nsproxy with sys_unshare,
-        * bu uncharing pid namespaces is not allowed, so we'll always
-        * see relevant namespace
+        * task_active_pid_ns will always return the same pid namespace
+        * until a task passes through release_task.
         *
         * write_lock() currently calls preempt_disable() which is the
         * same as rcu_read_lock(), but according to Oleg, this is not
         * correct to rely on this
         */
        rcu_read_lock();
-       info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+       info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
        info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
                                       task_uid(tsk));
        rcu_read_unlock();
@@ -2369,24 +2368,34 @@ relock:
 }
 
 /**
- * block_sigmask - add @ka's signal mask to current->blocked
- * @ka: action for @signr
- * @signr: signal that has been successfully delivered
+ * signal_delivered - 
+ * @sig:               number of signal being delivered
+ * @info:              siginfo_t of signal being delivered
+ * @ka:                        sigaction setting that chose the handler
+ * @regs:              user register state
+ * @stepping:          nonzero if debugger single-step or block-step in use
  *
  * This function should be called when a signal has succesfully been
- * delivered. It adds the mask of signals for @ka to current->blocked
- * so that they are blocked during the execution of the signal
- * handler. In addition, @signr will be blocked unless %SA_NODEFER is
- * set in @ka->sa.sa_flags.
+ * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
+ * is always blocked, and the signal itself is blocked unless %SA_NODEFER
+ * is set in @ka->sa.sa_flags.  Tracing is notified.
  */
-void block_sigmask(struct k_sigaction *ka, int signr)
+void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
+                       struct pt_regs *regs, int stepping)
 {
        sigset_t blocked;
 
+       /* A signal was successfully delivered, and the
+          saved sigmask was stored on the signal frame,
+          and will be restored by sigreturn.  So we can
+          simply clear the restore sigmask flag.  */
+       clear_restore_sigmask();
+
        sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NODEFER))
-               sigaddset(&blocked, signr);
+               sigaddset(&blocked, sig);
        set_current_blocked(&blocked);
+       tracehook_signal_handler(sig, info, ka, regs, stepping);
 }
 
 /*
@@ -2519,7 +2528,16 @@ static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
  * It is wrong to change ->blocked directly, this helper should be used
  * to ensure the process can't miss a shared signal we are going to block.
  */
-void set_current_blocked(const sigset_t *newset)
+void set_current_blocked(sigset_t *newset)
+{
+       struct task_struct *tsk = current;
+       sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
+       spin_lock_irq(&tsk->sighand->siglock);
+       __set_task_blocked(tsk, newset);
+       spin_unlock_irq(&tsk->sighand->siglock);
+}
+
+void __set_current_blocked(const sigset_t *newset)
 {
        struct task_struct *tsk = current;
 
@@ -2559,7 +2577,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
                return -EINVAL;
        }
 
-       set_current_blocked(&newset);
+       __set_current_blocked(&newset);
        return 0;
 }
 
@@ -3133,7 +3151,7 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
                        return -EINVAL;
                }
 
-               set_current_blocked(&new_blocked);
+               __set_current_blocked(&new_blocked);
        }
 
        if (oset) {
@@ -3197,7 +3215,6 @@ SYSCALL_DEFINE1(ssetmask, int, newmask)
        int old = current->blocked.sig[0];
        sigset_t newset;
 
-       siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
        set_current_blocked(&newset);
 
        return old;
@@ -3236,11 +3253,8 @@ SYSCALL_DEFINE0(pause)
 
 #endif
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 int sigsuspend(sigset_t *set)
 {
-       sigdelsetmask(set, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
        current->saved_sigmask = current->blocked;
        set_current_blocked(set);
 
@@ -3249,7 +3263,6 @@ int sigsuspend(sigset_t *set)
        set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
-#endif
 
 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
 /**
index e1a797e028a320d62c01e77a47f533e0998adaa3..98f60c5caa1bef91733a4c0bcc646da4fa0b7a24 100644 (file)
@@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void)
        per_cpu(idle_threads, smp_processor_id()) = current;
 }
 
+/**
+ * idle_init - Initialize the idle thread for a cpu
+ * @cpu:       The cpu for which the idle thread should be initialized
+ *
+ * Creates the thread if it does not exist.
+ */
 static inline void idle_init(unsigned int cpu)
 {
        struct task_struct *tsk = per_cpu(idle_threads, cpu);
@@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu)
 }
 
 /**
- * idle_thread_init - Initialize the idle thread for a cpu
- * @cpu:       The cpu for which the idle thread should be initialized
- *
- * Creates the thread if it does not exist.
+ * idle_threads_init - Initialize idle threads for all cpus
  */
 void __init idle_threads_init(void)
 {
-       unsigned int cpu;
+       unsigned int cpu, boot_cpu;
+
+       boot_cpu = smp_processor_id();
 
        for_each_possible_cpu(cpu) {
-               if (cpu != smp_processor_id())
+               if (cpu != boot_cpu)
                        idle_init(cpu);
        }
 }
index 6df42624e454aeb236ab1c9413d6fdf5f676365f..2d39a84cd8575e6295f666421cee01b41652b3b1 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/personality.h>
 #include <linux/ptrace.h>
 #include <linux/fs_struct.h>
+#include <linux/file.h>
+#include <linux/mount.h>
 #include <linux/gfp.h>
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
@@ -1378,8 +1380,8 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
                memcpy(u->nodename, tmp, len);
                memset(u->nodename + len, 0, sizeof(u->nodename) - len);
                errno = 0;
+               uts_proc_notify(UTS_PROC_HOSTNAME);
        }
-       uts_proc_notify(UTS_PROC_HOSTNAME);
        up_write(&uts_sem);
        return errno;
 }
@@ -1429,8 +1431,8 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
                memcpy(u->domainname, tmp, len);
                memset(u->domainname + len, 0, sizeof(u->domainname) - len);
                errno = 0;
+               uts_proc_notify(UTS_PROC_DOMAINNAME);
        }
-       uts_proc_notify(UTS_PROC_DOMAINNAME);
        up_write(&uts_sem);
        return errno;
 }
@@ -1784,77 +1786,105 @@ SYSCALL_DEFINE1(umask, int, mask)
 }
 
 #ifdef CONFIG_CHECKPOINT_RESTORE
+static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
+{
+       struct file *exe_file;
+       struct dentry *dentry;
+       int err;
+
+       exe_file = fget(fd);
+       if (!exe_file)
+               return -EBADF;
+
+       dentry = exe_file->f_path.dentry;
+
+       /*
+        * Because the original mm->exe_file points to executable file, make
+        * sure that this one is executable as well, to avoid breaking an
+        * overall picture.
+        */
+       err = -EACCES;
+       if (!S_ISREG(dentry->d_inode->i_mode)   ||
+           exe_file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+               goto exit;
+
+       err = inode_permission(dentry->d_inode, MAY_EXEC);
+       if (err)
+               goto exit;
+
+       down_write(&mm->mmap_sem);
+
+       /*
+        * Forbid mm->exe_file change if old file still mapped.
+        */
+       err = -EBUSY;
+       if (mm->exe_file) {
+               struct vm_area_struct *vma;
+
+               for (vma = mm->mmap; vma; vma = vma->vm_next)
+                       if (vma->vm_file &&
+                           path_equal(&vma->vm_file->f_path,
+                                      &mm->exe_file->f_path))
+                               goto exit_unlock;
+       }
+
+       /*
+        * The symlink can be changed only once, just to disallow arbitrary
+        * transitions malicious software might bring in. This means one
+        * could make a snapshot over all processes running and monitor
+        * /proc/pid/exe changes to notice unusual activity if needed.
+        */
+       err = -EPERM;
+       if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
+               goto exit_unlock;
+
+       err = 0;
+       set_mm_exe_file(mm, exe_file);
+exit_unlock:
+       up_write(&mm->mmap_sem);
+
+exit:
+       fput(exe_file);
+       return err;
+}
+
 static int prctl_set_mm(int opt, unsigned long addr,
                        unsigned long arg4, unsigned long arg5)
 {
        unsigned long rlim = rlimit(RLIMIT_DATA);
-       unsigned long vm_req_flags;
-       unsigned long vm_bad_flags;
-       struct vm_area_struct *vma;
-       int error = 0;
        struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int error;
 
-       if (arg4 | arg5)
+       if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
                return -EINVAL;
 
        if (!capable(CAP_SYS_RESOURCE))
                return -EPERM;
 
-       if (addr >= TASK_SIZE)
+       if (opt == PR_SET_MM_EXE_FILE)
+               return prctl_set_mm_exe_file(mm, (unsigned int)addr);
+
+       if (addr >= TASK_SIZE || addr < mmap_min_addr)
                return -EINVAL;
 
+       error = -EINVAL;
+
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, addr);
 
-       if (opt != PR_SET_MM_START_BRK && opt != PR_SET_MM_BRK) {
-               /* It must be existing VMA */
-               if (!vma || vma->vm_start > addr)
-                       goto out;
-       }
-
-       error = -EINVAL;
        switch (opt) {
        case PR_SET_MM_START_CODE:
+               mm->start_code = addr;
+               break;
        case PR_SET_MM_END_CODE:
-               vm_req_flags = VM_READ | VM_EXEC;
-               vm_bad_flags = VM_WRITE | VM_MAYSHARE;
-
-               if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
-                   (vma->vm_flags & vm_bad_flags))
-                       goto out;
-
-               if (opt == PR_SET_MM_START_CODE)
-                       mm->start_code = addr;
-               else
-                       mm->end_code = addr;
+               mm->end_code = addr;
                break;
-
        case PR_SET_MM_START_DATA:
-       case PR_SET_MM_END_DATA:
-               vm_req_flags = VM_READ | VM_WRITE;
-               vm_bad_flags = VM_EXEC | VM_MAYSHARE;
-
-               if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
-                   (vma->vm_flags & vm_bad_flags))
-                       goto out;
-
-               if (opt == PR_SET_MM_START_DATA)
-                       mm->start_data = addr;
-               else
-                       mm->end_data = addr;
+               mm->start_data = addr;
                break;
-
-       case PR_SET_MM_START_STACK:
-
-#ifdef CONFIG_STACK_GROWSUP
-               vm_req_flags = VM_READ | VM_WRITE | VM_GROWSUP;
-#else
-               vm_req_flags = VM_READ | VM_WRITE | VM_GROWSDOWN;
-#endif
-               if ((vma->vm_flags & vm_req_flags) != vm_req_flags)
-                       goto out;
-
-               mm->start_stack = addr;
+       case PR_SET_MM_END_DATA:
+               mm->end_data = addr;
                break;
 
        case PR_SET_MM_START_BRK:
@@ -1881,24 +1911,89 @@ static int prctl_set_mm(int opt, unsigned long addr,
                mm->brk = addr;
                break;
 
+       /*
+        * If command line arguments and environment
+        * are placed somewhere else on stack, we can
+        * set them up here, ARG_START/END to setup
+        * command line argumets and ENV_START/END
+        * for environment.
+        */
+       case PR_SET_MM_START_STACK:
+       case PR_SET_MM_ARG_START:
+       case PR_SET_MM_ARG_END:
+       case PR_SET_MM_ENV_START:
+       case PR_SET_MM_ENV_END:
+               if (!vma) {
+                       error = -EFAULT;
+                       goto out;
+               }
+               if (opt == PR_SET_MM_START_STACK)
+                       mm->start_stack = addr;
+               else if (opt == PR_SET_MM_ARG_START)
+                       mm->arg_start = addr;
+               else if (opt == PR_SET_MM_ARG_END)
+                       mm->arg_end = addr;
+               else if (opt == PR_SET_MM_ENV_START)
+                       mm->env_start = addr;
+               else if (opt == PR_SET_MM_ENV_END)
+                       mm->env_end = addr;
+               break;
+
+       /*
+        * This doesn't move auxiliary vector itself
+        * since it's pinned to mm_struct, but allow
+        * to fill vector with new values. It's up
+        * to a caller to provide sane values here
+        * otherwise user space tools which use this
+        * vector might be unhappy.
+        */
+       case PR_SET_MM_AUXV: {
+               unsigned long user_auxv[AT_VECTOR_SIZE];
+
+               if (arg4 > sizeof(user_auxv))
+                       goto out;
+               up_read(&mm->mmap_sem);
+
+               if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
+                       return -EFAULT;
+
+               /* Make sure the last entry is always AT_NULL */
+               user_auxv[AT_VECTOR_SIZE - 2] = 0;
+               user_auxv[AT_VECTOR_SIZE - 1] = 0;
+
+               BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
+
+               task_lock(current);
+               memcpy(mm->saved_auxv, user_auxv, arg4);
+               task_unlock(current);
+
+               return 0;
+       }
        default:
-               error = -EINVAL;
                goto out;
        }
 
        error = 0;
-
 out:
        up_read(&mm->mmap_sem);
-
        return error;
 }
+
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+       return put_user(me->clear_child_tid, tid_addr);
+}
+
 #else /* CONFIG_CHECKPOINT_RESTORE */
 static int prctl_set_mm(int opt, unsigned long addr,
                        unsigned long arg4, unsigned long arg5)
 {
        return -EINVAL;
 }
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+       return -EINVAL;
+}
 #endif
 
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -2053,6 +2148,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                case PR_SET_MM:
                        error = prctl_set_mm(arg2, arg3, arg4, arg5);
                        break;
+               case PR_GET_TID_ADDRESS:
+                       error = prctl_get_tid_address(me, (int __user **)arg2);
+                       break;
                case PR_SET_CHILD_SUBREAPER:
                        me->signal->is_child_subreaper = !!arg2;
                        error = 0;
@@ -2114,7 +2212,6 @@ int orderly_poweroff(bool force)
                NULL
        };
        int ret = -ENOMEM;
-       struct subprocess_info *info;
 
        if (argv == NULL) {
                printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
@@ -2122,18 +2219,16 @@ int orderly_poweroff(bool force)
                goto out;
        }
 
-       info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
-       if (info == NULL) {
-               argv_free(argv);
-               goto out;
-       }
-
-       call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
+       ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT,
+                                     NULL, argv_cleanup, NULL);
+out:
+       if (likely(!ret))
+               return 0;
 
-       ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
+       if (ret == -ENOMEM)
+               argv_free(argv);
 
-  out:
-       if (ret && force) {
+       if (force) {
                printk(KERN_WARNING "Failed to start orderly shutdown: "
                       "forcing the issue\n");
 
index 47bfa16430d7dc764c17a06f4c40dd142ef6a88a..dbff751e408647badd0d7e92b935962bfc3ef8e2 100644 (file)
@@ -203,3 +203,6 @@ cond_syscall(sys_fanotify_mark);
 cond_syscall(sys_name_to_handle_at);
 cond_syscall(sys_open_by_handle_at);
 cond_syscall(compat_sys_open_by_handle_at);
+
+/* compare kernel pointers */
+cond_syscall(sys_kcmp);
diff --git a/kernel/task_work.c b/kernel/task_work.c
new file mode 100644 (file)
index 0000000..82d1c79
--- /dev/null
@@ -0,0 +1,84 @@
+#include <linux/spinlock.h>
+#include <linux/task_work.h>
+#include <linux/tracehook.h>
+
+int
+task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
+{
+       unsigned long flags;
+       int err = -ESRCH;
+
+#ifndef TIF_NOTIFY_RESUME
+       if (notify)
+               return -ENOTSUPP;
+#endif
+       /*
+        * We must not insert the new work if the task has already passed
+        * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
+        * and check PF_EXITING under pi_lock.
+        */
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       if (likely(!(task->flags & PF_EXITING))) {
+               hlist_add_head(&twork->hlist, &task->task_works);
+               err = 0;
+       }
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+       /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
+       if (likely(!err) && notify)
+               set_notify_resume(task);
+       return err;
+}
+
+struct task_work *
+task_work_cancel(struct task_struct *task, task_work_func_t func)
+{
+       unsigned long flags;
+       struct task_work *twork;
+       struct hlist_node *pos;
+
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       hlist_for_each_entry(twork, pos, &task->task_works, hlist) {
+               if (twork->func == func) {
+                       hlist_del(&twork->hlist);
+                       goto found;
+               }
+       }
+       twork = NULL;
+ found:
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+       return twork;
+}
+
+void task_work_run(void)
+{
+       struct task_struct *task = current;
+       struct hlist_head task_works;
+       struct hlist_node *pos;
+
+       raw_spin_lock_irq(&task->pi_lock);
+       hlist_move_list(&task->task_works, &task_works);
+       raw_spin_unlock_irq(&task->pi_lock);
+
+       if (unlikely(hlist_empty(&task_works)))
+               return;
+       /*
+        * We use hlist to save the space in task_struct, but we want fifo.
+        * Find the last entry, the list should be short, then process them
+        * in reverse order.
+        */
+       for (pos = task_works.first; pos->next; pos = pos->next)
+               ;
+
+       for (;;) {
+               struct hlist_node **pprev = pos->pprev;
+               struct task_work *twork = container_of(pos, struct task_work,
+                                                       hlist);
+               twork->func(twork);
+
+               if (pprev == &task_works.first)
+                       break;
+               pos = container_of(pprev, struct hlist_node, next);
+       }
+}
index 9cd928f7a7c6a18e8188988a3ed7cfcfec8c808e..7e1ce012a851351c10853fe732e714afa2bb8a5c 100644 (file)
@@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev)
 }
 EXPORT_SYMBOL_GPL(clockevents_register_device);
 
-static void clockevents_config(struct clock_event_device *dev,
-                              u32 freq)
+void clockevents_config(struct clock_event_device *dev, u32 freq)
 {
        u64 sec;
 
index 70b33abcc7bb0e92762b05af6c6ceaa1be75cd38..b7fbadc5c973c928e531cf6d701f1520e4809812 100644 (file)
@@ -409,7 +409,9 @@ int second_overflow(unsigned long secs)
                        time_state = TIME_DEL;
                break;
        case TIME_INS:
-               if (secs % 86400 == 0) {
+               if (!(time_status & STA_INS))
+                       time_state = TIME_OK;
+               else if (secs % 86400 == 0) {
                        leap = -1;
                        time_state = TIME_OOP;
                        time_tai++;
@@ -418,7 +420,9 @@ int second_overflow(unsigned long secs)
                }
                break;
        case TIME_DEL:
-               if ((secs + 1) % 86400 == 0) {
+               if (!(time_status & STA_DEL))
+                       time_state = TIME_OK;
+               else if ((secs + 1) % 86400 == 0) {
                        leap = 1;
                        time_tai--;
                        time_state = TIME_WAIT;
index 6a3a5b9ff56176c2951256edf7ef9601a0f49106..4a08472c3ca71399edc34ffafa9f5394f25597e7 100644 (file)
@@ -274,6 +274,7 @@ EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
 {
        unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
+       unsigned long rcu_delta_jiffies;
        ktime_t last_update, expires, now;
        struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
        u64 time_delta;
@@ -322,7 +323,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                time_delta = timekeeping_max_deferment();
        } while (read_seqretry(&xtime_lock, seq));
 
-       if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+       if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
            arch_needs_cpu(cpu)) {
                next_jiffies = last_jiffies + 1;
                delta_jiffies = 1;
@@ -330,6 +331,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                /* Get the next timer wheel timer */
                next_jiffies = get_next_timer_interrupt(last_jiffies);
                delta_jiffies = next_jiffies - last_jiffies;
+               if (rcu_delta_jiffies < delta_jiffies) {
+                       next_jiffies = last_jiffies + rcu_delta_jiffies;
+                       delta_jiffies = rcu_delta_jiffies;
+               }
        }
        /*
         * Do not stop the tick, if we are only one off
@@ -401,6 +406,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                 */
                if (!ts->tick_stopped) {
                        select_nohz_load_balancer(1);
+                       calc_load_enter_idle();
 
                        ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
                        ts->tick_stopped = 1;
@@ -576,6 +582,7 @@ void tick_nohz_idle_exit(void)
        /* Update jiffies first */
        select_nohz_load_balancer(0);
        tick_do_update_jiffies64(now);
+       update_cpu_load_nohz();
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        /*
@@ -591,6 +598,7 @@ void tick_nohz_idle_exit(void)
                account_idle_ticks(ticks);
 #endif
 
+       calc_load_exit_idle();
        touch_softlockup_watchdog();
        /*
         * Cancel the scheduled timer and restore the tick
@@ -814,6 +822,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
        return HRTIMER_RESTART;
 }
 
+static int sched_skew_tick;
+
+static int __init skew_tick(char *str)
+{
+       get_option(&str, &sched_skew_tick);
+
+       return 0;
+}
+early_param("skew_tick", skew_tick);
+
 /**
  * tick_setup_sched_timer - setup the tick emulation timer
  */
@@ -831,6 +849,14 @@ void tick_setup_sched_timer(void)
        /* Get the next period (per cpu) */
        hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
 
+       /* Offset the tick to avert xtime_lock contention. */
+       if (sched_skew_tick) {
+               u64 offset = ktime_to_ns(tick_period) >> 1;
+               do_div(offset, num_possible_cpus());
+               offset *= smp_processor_id();
+               hrtimer_add_expires_ns(&ts->sched_timer, offset);
+       }
+
        for (;;) {
                hrtimer_forward(&ts->sched_timer, now, tick_period);
                hrtimer_start_expires(&ts->sched_timer,
index 6e46cacf5969c8290a7933a548b713a227b2e183..3447cfaf11e7d61b11d30bc063967e47d85e59d4 100644 (file)
@@ -70,6 +70,12 @@ struct timekeeper {
        /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
        struct timespec raw_time;
 
+       /* Offset clock monotonic -> clock realtime */
+       ktime_t offs_real;
+
+       /* Offset clock monotonic -> clock boottime */
+       ktime_t offs_boot;
+
        /* Seqlock for all timekeeper values */
        seqlock_t lock;
 };
@@ -172,6 +178,14 @@ static inline s64 timekeeping_get_ns_raw(void)
        return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 }
 
+static void update_rt_offset(void)
+{
+       struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic;
+
+       set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
+       timekeeper.offs_real = timespec_to_ktime(tmp);
+}
+
 /* must hold write on timekeeper.lock */
 static void timekeeping_update(bool clearntp)
 {
@@ -179,6 +193,7 @@ static void timekeeping_update(bool clearntp)
                timekeeper.ntp_error = 0;
                ntp_clear();
        }
+       update_rt_offset();
        update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
                         timekeeper.clock, timekeeper.mult);
 }
@@ -604,6 +619,7 @@ void __init timekeeping_init(void)
        }
        set_normalized_timespec(&timekeeper.wall_to_monotonic,
                                -boot.tv_sec, -boot.tv_nsec);
+       update_rt_offset();
        timekeeper.total_sleep_time.tv_sec = 0;
        timekeeper.total_sleep_time.tv_nsec = 0;
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -612,6 +628,12 @@ void __init timekeeping_init(void)
 /* time in seconds when suspend began */
 static struct timespec timekeeping_suspend_time;
 
+static void update_sleep_time(struct timespec t)
+{
+       timekeeper.total_sleep_time = t;
+       timekeeper.offs_boot = timespec_to_ktime(t);
+}
+
 /**
  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
  * @delta: pointer to a timespec delta value
@@ -630,8 +652,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
        timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
        timekeeper.wall_to_monotonic =
                        timespec_sub(timekeeper.wall_to_monotonic, *delta);
-       timekeeper.total_sleep_time = timespec_add(
-                                       timekeeper.total_sleep_time, *delta);
+       update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta));
 }
 
 
@@ -696,6 +717,7 @@ static void timekeeping_resume(void)
        timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
        timekeeper.ntp_error = 0;
        timekeeping_suspended = 0;
+       timekeeping_update(false);
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
 
        touch_softlockup_watchdog();
@@ -962,6 +984,9 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
                timekeeper.xtime.tv_sec++;
                leap = second_overflow(timekeeper.xtime.tv_sec);
                timekeeper.xtime.tv_sec += leap;
+               timekeeper.wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
        /* Accumulate raw time */
@@ -1077,6 +1102,9 @@ static void update_wall_time(void)
                timekeeper.xtime.tv_sec++;
                leap = second_overflow(timekeeper.xtime.tv_sec);
                timekeeper.xtime.tv_sec += leap;
+               timekeeper.wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
        timekeeping_update(false);
@@ -1244,6 +1272,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
        } while (read_seqretry(&timekeeper.lock, seq));
 }
 
+#ifdef CONFIG_HIGH_RES_TIMERS
+/**
+ * ktime_get_update_offsets - hrtimer helper
+ * @offs_real: pointer to storage for monotonic -> realtime offset
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
+ *
+ * Returns current monotonic time and updates the offsets
+ * Called from hrtimer_interupt() or retrigger_next_event()
+ */
+ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
+{
+       ktime_t now;
+       unsigned int seq;
+       u64 secs, nsecs;
+
+       do {
+               seq = read_seqbegin(&timekeeper.lock);
+
+               secs = timekeeper.xtime.tv_sec;
+               nsecs = timekeeper.xtime.tv_nsec;
+               nsecs += timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
+
+               *offs_real = timekeeper.offs_real;
+               *offs_boot = timekeeper.offs_boot;
+       } while (read_seqretry(&timekeeper.lock, seq));
+
+       now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+       now = ktime_sub(now, *offs_real);
+       return now;
+}
+#endif
+
 /**
  * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
  */
index 6420cda62336c1194d02c86203a91e18766a037b..f765465bffe47968752e272308d7fb5b63093a65 100644 (file)
@@ -1075,6 +1075,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
        rb_init_page(bpage->page);
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+       INIT_LIST_HEAD(&cpu_buffer->new_pages);
 
        ret = rb_allocate_pages(cpu_buffer, nr_pages);
        if (ret < 0)
@@ -1346,10 +1347,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
                         * If something was added to this page, it was full
                         * since it is not the tail page. So we deduct the
                         * bytes consumed in ring buffer from here.
-                        * No need to update overruns, since this page is
-                        * deleted from ring buffer and its entries are
-                        * already accounted for.
+                        * Increment overrun to account for the lost events.
                         */
+                       local_add(page_entries, &cpu_buffer->overrun);
                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
                }
 
@@ -1486,6 +1486,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
        if (!buffer)
                return size;
 
+       /* Make sure the requested buffer exists */
+       if (cpu_id != RING_BUFFER_ALL_CPUS &&
+           !cpumask_test_cpu(cpu_id, buffer->cpumask))
+               return size;
+
        size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
        size *= BUF_PAGE_SIZE;
 
index 68032c6177dbb84a28d0bc432773f841101c11b4..a7fa0702be1cd5b269ed6f15e9624f4b02968b11 100644 (file)
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
 void tracing_off(void)
 {
        if (global_trace.buffer)
-               ring_buffer_record_on(global_trace.buffer);
+               ring_buffer_record_off(global_trace.buffer);
        /*
         * This flag is only looked at when buffers haven't been
         * allocated yet. We don't really care about the race
@@ -3609,6 +3609,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3680,7 +3681,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        ret = splice_to_pipe(pipe, &spd);
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4231,6 +4232,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4318,7 +4320,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
index e5e1d85b8c7c23090ce59b7e5b1e868535c85ef0..4b1dfba70f7cf8ae7397623656a9b695028f702a 100644 (file)
@@ -372,6 +372,13 @@ static int watchdog(void *unused)
 
 
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
 static int watchdog_nmi_enable(int cpu)
 {
        struct perf_event_attr *wd_attr;
@@ -390,11 +397,21 @@ static int watchdog_nmi_enable(int cpu)
 
        /* Try to register using hardware perf events */
        event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+       /* save cpu0 error for future comparision */
+       if (cpu == 0 && IS_ERR(event))
+               cpu0_err = PTR_ERR(event);
+
        if (!IS_ERR(event)) {
-               pr_info("enabled, takes one hw-pmu counter.\n");
+               /* only print for cpu0 or different than cpu0 */
+               if (cpu == 0 || cpu0_err)
+                       pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
                goto out_save;
        }
 
+       /* skip displaying the same error again */
+       if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+               return PTR_ERR(event);
 
        /* vary the KERN level based on the returned errno */
        if (PTR_ERR(event) == -EOPNOTSUPP)
index a42d3ae39648386b81a649e9844afd4e0b03bca9..ff5bdee4716d5206c7f16f4ce67e911f668c97e7 100644 (file)
@@ -241,6 +241,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
        default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
        default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
 
+config PANIC_ON_OOPS
+       bool "Panic on Oops" if EXPERT
+       default n
+       help
+         Say Y here to enable the kernel to panic when it oopses. This
+         has the same effect as setting oops=panic on the kernel command
+         line.
+
+         This feature is useful to ensure that the kernel does not do
+         anything erroneous after an oops which could result in data
+         corruption or other issues.
+
+         Say N if unsure.
+
+config PANIC_ON_OOPS_VALUE
+       int
+       range 0 1
+       default 0 if !PANIC_ON_OOPS
+       default 1 if PANIC_ON_OOPS
+
 config DETECT_HUNG_TASK
        bool "Detect Hung Tasks"
        depends on DEBUG_KERNEL
index e5ec1e9c1aa52cc08c710a4dcc4c1815cad9c67b..f9a484676cb6a8e4782333ce164bf0662b583b78 100644 (file)
@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
 
        if (head->height == 0)
                return NULL;
-retry:
        longcpy(key, __key, geo->keylen);
+retry:
        dec_key(geo, key);
 
        node = head->node;
@@ -351,7 +351,7 @@ retry:
        }
 miss:
        if (retry_key) {
-               __key = retry_key;
+               longcpy(key, retry_key, geo->keylen);
                retry_key = NULL;
                goto retry;
        }
@@ -509,6 +509,7 @@ retry:
 int btree_insert(struct btree_head *head, struct btree_geo *geo,
                unsigned long *key, void *val, gfp_t gfp)
 {
+       BUG_ON(!val);
        return btree_insert_level(head, geo, key, val, 1, gfp);
 }
 EXPORT_SYMBOL_GPL(btree_insert);
index 518aea714d21d9dea1c7d52b0cf6878482ca5d82..66ce414891330964aacb48dee8079138034cad3d 100644 (file)
@@ -78,7 +78,7 @@ static LIST_HEAD(free_entries);
 static DEFINE_SPINLOCK(free_entries_lock);
 
 /* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
+static u32 global_disable __read_mostly;
 
 /* Global error count */
 static u32 error_count;
@@ -657,7 +657,7 @@ static int dma_debug_fs_init(void)
 
        global_disable_dent = debugfs_create_bool("disabled", 0444,
                        dma_debug_dent,
-                       (u32 *)&global_disable);
+                       &global_disable);
        if (!global_disable_dent)
                goto out_err;
 
index 6ab4587d052b5994f3be952b732bfc5a613664f9..0777c5a45fa04f3464100dfdf24dbee15aecfd98 100644 (file)
 #include <linux/jiffies.h>
 #include <linux/dynamic_queue_limits.h>
 
-#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
+#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
+#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
 
 /* Records completed count and recalculates the queue limit */
 void dql_completed(struct dql *dql, unsigned int count)
 {
        unsigned int inprogress, prev_inprogress, limit;
-       unsigned int ovlimit, all_prev_completed, completed;
+       unsigned int ovlimit, completed, num_queued;
+       bool all_prev_completed;
+
+       num_queued = ACCESS_ONCE(dql->num_queued);
 
        /* Can't complete more than what's in queue */
-       BUG_ON(count > dql->num_queued - dql->num_completed);
+       BUG_ON(count > num_queued - dql->num_completed);
 
        completed = dql->num_completed + count;
        limit = dql->limit;
-       ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
-       inprogress = dql->num_queued - completed;
+       ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
+       inprogress = num_queued - completed;
        prev_inprogress = dql->prev_num_queued - dql->num_completed;
-       all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
+       all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
 
        if ((ovlimit && !inprogress) ||
            (dql->prev_ovlimit && all_prev_completed)) {
@@ -104,7 +108,7 @@ void dql_completed(struct dql *dql, unsigned int count)
        dql->prev_ovlimit = ovlimit;
        dql->prev_last_obj_cnt = dql->last_obj_cnt;
        dql->num_completed = completed;
-       dql->prev_num_queued = dql->num_queued;
+       dql->prev_num_queued = num_queued;
 }
 EXPORT_SYMBOL(dql_completed);
 
index 6805453c18e78a9d51592cb8ff3313fdbd3acc9a..f7210ad6cffd44c2486f8fef542dc5fa5affdae3 100644 (file)
@@ -101,6 +101,10 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
 
 bool should_fail(struct fault_attr *attr, ssize_t size)
 {
+       /* No need to check any other properties if the probability is 0 */
+       if (attr->probability == 0)
+               return false;
+
        if (attr->task_filter && !fail_task(attr, current))
                return false;
 
index d7c878cc006cf62ac2b0b0abb1a3a69fa62c64e2..e7964296fd50551020872d430009e890d0e97b5d 100644 (file)
@@ -686,6 +686,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
         * during iterating; it can be zero only at the beginning.
         * And we cannot overflow iter->next_index in a single step,
         * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
+        *
+        * This condition also used by radix_tree_next_slot() to stop
+        * contiguous iterating, and forbid swithing to the next chunk.
         */
        index = iter->next_index;
        if (!index && iter->index)
index 1805a5cc5daaac68a08cd3f3cdd3e7d4cc5b5bad..a95bccb8497d8f923d5641e9c085e460605d261d 100644 (file)
@@ -22,8 +22,8 @@
 #include <linux/raid/pq.h>
 
 /* Recover two failed data blocks. */
-void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb,
-                      void **ptrs)
+static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
+               int failb, void **ptrs)
 {
        u8 *p, *q, *dp, *dq;
        u8 px, qx, db;
@@ -66,7 +66,8 @@ void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb,
 }
 
 /* Recover failure of one data block plus the P block */
-void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs)
+static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
+               void **ptrs)
 {
        u8 *p, *q, *dq;
        const u8 *qmul;         /* Q multiplier table */
index 37ae61930559c0b4c77ce1080d1cb56d6dcdac95..ecb710c0b4d92110558b810045172e7046c20ec0 100644 (file)
@@ -19,8 +19,8 @@ static int raid6_has_ssse3(void)
                boot_cpu_has(X86_FEATURE_SSSE3);
 }
 
-void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb,
-                      void **ptrs)
+static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
+               int failb, void **ptrs)
 {
        u8 *p, *q, *dp, *dq;
        const u8 *pbmul;        /* P multiplier table for B data */
@@ -194,7 +194,8 @@ void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb,
 }
 
 
-void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs)
+static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
+               void **ptrs)
 {
        u8 *p, *q, *dq;
        const u8 *qmul;         /* Q multiplier table */
index d0ec4f3d1593031b5498dcc120822b41c423a3c5..e91fbc23fff121915217a8e5c53c29a2b52aeba4 100644 (file)
@@ -118,7 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
                /* lockup suspected: */
                if (print_once) {
                        print_once = 0;
-                       spin_dump(lock, "lockup");
+                       spin_dump(lock, "lockup suspected");
 #ifdef CONFIG_SMP
                        trigger_all_cpu_backtrace();
 #endif
index 5391299c1e7852dcc820a9e14ef88720fa949044..c3f36d415bdf43034415c801b8e3f922f5e5b928 100644 (file)
@@ -112,106 +112,199 @@ int skip_atoi(const char **s)
 /* Decimal conversion is by far the most typical, and is used
  * for /proc and /sys data. This directly impacts e.g. top performance
  * with many processes running. We optimize it for speed
- * using code from
- * http://www.cs.uiowa.edu/~jones/bcd/decimal.html
- * (with permission from the author, Douglas W. Jones). */
+ * using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
+ * (with permission from the author, Douglas W. Jones).
+ */
 
-/* Formats correctly any integer in [0,99999].
- * Outputs from one to five digits depending on input.
- * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+/* Formats correctly any integer in [0, 999999999] */
 static noinline_for_stack
-char *put_dec_trunc(char *buf, unsigned q)
+char *put_dec_full9(char *buf, unsigned q)
 {
-       unsigned d3, d2, d1, d0;
-       d1 = (q>>4) & 0xf;
-       d2 = (q>>8) & 0xf;
-       d3 = (q>>12);
-
-       d0 = 6*(d3 + d2 + d1) + (q & 0xf);
-       q = (d0 * 0xcd) >> 11;
-       d0 = d0 - 10*q;
-       *buf++ = d0 + '0'; /* least significant digit */
-       d1 = q + 9*d3 + 5*d2 + d1;
-       if (d1 != 0) {
-               q = (d1 * 0xcd) >> 11;
-               d1 = d1 - 10*q;
-               *buf++ = d1 + '0'; /* next digit */
-
-               d2 = q + 2*d2;
-               if ((d2 != 0) || (d3 != 0)) {
-                       q = (d2 * 0xd) >> 7;
-                       d2 = d2 - 10*q;
-                       *buf++ = d2 + '0'; /* next digit */
-
-                       d3 = q + 4*d3;
-                       if (d3 != 0) {
-                               q = (d3 * 0xcd) >> 11;
-                               d3 = d3 - 10*q;
-                               *buf++ = d3 + '0';  /* next digit */
-                               if (q != 0)
-                                       *buf++ = q + '0'; /* most sign. digit */
-                       }
-               }
-       }
+       unsigned r;
 
+       /*
+        * Possible ways to approx. divide by 10
+        * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
+        * (x * 0xcccd) >> 19     x <      81920 (x < 262149 when 64-bit mul)
+        * (x * 0x6667) >> 18     x <      43699
+        * (x * 0x3334) >> 17     x <      16389
+        * (x * 0x199a) >> 16     x <      16389
+        * (x * 0x0ccd) >> 15     x <      16389
+        * (x * 0x0667) >> 14     x <       2739
+        * (x * 0x0334) >> 13     x <       1029
+        * (x * 0x019a) >> 12     x <       1029
+        * (x * 0x00cd) >> 11     x <       1029 shorter code than * 0x67 (on i386)
+        * (x * 0x0067) >> 10     x <        179
+        * (x * 0x0034) >>  9     x <         69 same
+        * (x * 0x001a) >>  8     x <         69 same
+        * (x * 0x000d) >>  7     x <         69 same, shortest code (on i386)
+        * (x * 0x0007) >>  6     x <         19
+        * See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
+        */
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 1 */
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 2 */
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 3 */
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 4 */
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 5 */
+       /* Now value is under 10000, can avoid 64-bit multiply */
+       q      = (r * 0x199a) >> 16;
+       *buf++ = (r - 10 * q)  + '0'; /* 6 */
+       r      = (q * 0xcd) >> 11;
+       *buf++ = (q - 10 * r)  + '0'; /* 7 */
+       q      = (r * 0xcd) >> 11;
+       *buf++ = (r - 10 * q) + '0'; /* 8 */
+       *buf++ = q + '0'; /* 9 */
        return buf;
 }
-/* Same with if's removed. Always emits five digits */
+#endif
+
+/* Similar to above but do not pad with zeros.
+ * Code can be easily arranged to print 9 digits too, but our callers
+ * always call put_dec_full9() instead when the number has 9 decimal digits.
+ */
 static noinline_for_stack
-char *put_dec_full(char *buf, unsigned q)
+char *put_dec_trunc8(char *buf, unsigned r)
 {
-       /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
-       /* but anyway, gcc produces better code with full-sized ints */
-       unsigned d3, d2, d1, d0;
-       d1 = (q>>4) & 0xf;
-       d2 = (q>>8) & 0xf;
-       d3 = (q>>12);
+       unsigned q;
+
+       /* Copy of previous function's body with added early returns */
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 2 */
+       if (q == 0)
+               return buf;
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 3 */
+       if (r == 0)
+               return buf;
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 4 */
+       if (q == 0)
+               return buf;
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 5 */
+       if (r == 0)
+               return buf;
+       q      = (r * 0x199a) >> 16;
+       *buf++ = (r - 10 * q)  + '0'; /* 6 */
+       if (q == 0)
+               return buf;
+       r      = (q * 0xcd) >> 11;
+       *buf++ = (q - 10 * r)  + '0'; /* 7 */
+       if (r == 0)
+               return buf;
+       q      = (r * 0xcd) >> 11;
+       *buf++ = (r - 10 * q) + '0'; /* 8 */
+       if (q == 0)
+               return buf;
+       *buf++ = q + '0'; /* 9 */
+       return buf;
+}
 
-       /*
-        * Possible ways to approx. divide by 10
-        * gcc -O2 replaces multiply with shifts and adds
-        * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
-        * (x * 0x67) >> 10:  1100111
-        * (x * 0x34) >> 9:    110100 - same
-        * (x * 0x1a) >> 8:     11010 - same
-        * (x * 0x0d) >> 7:      1101 - same, shortest code (on i386)
-        */
-       d0 = 6*(d3 + d2 + d1) + (q & 0xf);
-       q = (d0 * 0xcd) >> 11;
-       d0 = d0 - 10*q;
-       *buf++ = d0 + '0';
-       d1 = q + 9*d3 + 5*d2 + d1;
-               q = (d1 * 0xcd) >> 11;
-               d1 = d1 - 10*q;
-               *buf++ = d1 + '0';
-
-               d2 = q + 2*d2;
-                       q = (d2 * 0xd) >> 7;
-                       d2 = d2 - 10*q;
-                       *buf++ = d2 + '0';
-
-                       d3 = q + 4*d3;
-                               q = (d3 * 0xcd) >> 11; /* - shorter code */
-                               /* q = (d3 * 0x67) >> 10; - would also work */
-                               d3 = d3 - 10*q;
-                               *buf++ = d3 + '0';
-                                       *buf++ = q + '0';
+/* There are two algorithms to print larger numbers.
+ * One is generic: divide by 1000000000 and repeatedly print
+ * groups of (up to) 9 digits. It's conceptually simple,
+ * but requires a (unsigned long long) / 1000000000 division.
+ *
+ * Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
+ * manipulates them cleverly and generates groups of 4 decimal digits.
+ * It so happens that it does NOT require long long division.
+ *
+ * If long is > 32 bits, division of 64-bit values is relatively easy,
+ * and we will use the first algorithm.
+ * If long long is > 64 bits (strange architecture with VERY large long long),
+ * second algorithm can't be used, and we again use the first one.
+ *
+ * Else (if long is 32 bits and long long is 64 bits) we use second one.
+ */
 
-       return buf;
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+
+/* First algorithm: generic */
+
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+       if (n >= 100*1000*1000) {
+               while (n >= 1000*1000*1000)
+                       buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
+               if (n >= 100*1000*1000)
+                       return put_dec_full9(buf, n);
+       }
+       return put_dec_trunc8(buf, n);
 }
-/* No inlining helps gcc to use registers better */
+
+#else
+
+/* Second algorithm: valid only for 64-bit long longs */
+
 static noinline_for_stack
-char *put_dec(char *buf, unsigned long long num)
+char *put_dec_full4(char *buf, unsigned q)
 {
-       while (1) {
-               unsigned rem;
-               if (num < 100000)
-                       return put_dec_trunc(buf, num);
-               rem = do_div(num, 100000);
-               buf = put_dec_full(buf, rem);
-       }
+       unsigned r;
+       r      = (q * 0xcccd) >> 19;
+       *buf++ = (q - 10 * r) + '0';
+       q      = (r * 0x199a) >> 16;
+       *buf++ = (r - 10 * q)  + '0';
+       r      = (q * 0xcd) >> 11;
+       *buf++ = (q - 10 * r)  + '0';
+       *buf++ = r + '0';
+       return buf;
 }
 
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+       uint32_t d3, d2, d1, q, h;
+
+       if (n < 100*1000*1000)
+               return put_dec_trunc8(buf, n);
+
+       d1  = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
+       h   = (n >> 32);
+       d2  = (h      ) & 0xffff;
+       d3  = (h >> 16); /* implicit "& 0xffff" */
+
+       q   = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
+
+       buf = put_dec_full4(buf, q % 10000);
+       q   = q / 10000;
+
+       d1  = q + 7671 * d3 + 9496 * d2 + 6 * d1;
+       buf = put_dec_full4(buf, d1 % 10000);
+       q   = d1 / 10000;
+
+       d2  = q + 4749 * d3 + 42 * d2;
+       buf = put_dec_full4(buf, d2 % 10000);
+       q   = d2 / 10000;
+
+       d3  = q + 281 * d3;
+       if (!d3)
+               goto done;
+       buf = put_dec_full4(buf, d3 % 10000);
+       q   = d3 / 10000;
+       if (!q)
+               goto done;
+       buf = put_dec_full4(buf, q);
+ done:
+       while (buf[-1] == '0')
+               --buf;
+
+       return buf;
+}
+
+#endif
+
 /*
  * Convert passed number to decimal string.
  * Returns the length of string.  On buffer overflow, returns 0.
@@ -220,16 +313,22 @@ char *put_dec(char *buf, unsigned long long num)
  */
 int num_to_str(char *buf, int size, unsigned long long num)
 {
-       char tmp[21];           /* Enough for 2^64 in decimal */
+       char tmp[sizeof(num) * 3];
        int idx, len;
 
-       len = put_dec(tmp, num) - tmp;
+       /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
+       if (num <= 9) {
+               tmp[0] = '0' + num;
+               len = 1;
+       } else {
+               len = put_dec(tmp, num) - tmp;
+       }
 
        if (len > size)
                return 0;
        for (idx = 0; idx < len; ++idx)
                buf[idx] = tmp[len - idx - 1];
-       return  len;
+       return len;
 }
 
 #define ZEROPAD        1               /* pad with zero */
@@ -314,8 +413,8 @@ char *number(char *buf, char *end, unsigned long long num,
 
        /* generate full string in tmp[], in reverse order */
        i = 0;
-       if (num == 0)
-               tmp[i++] = '0';
+       if (num < spec.base)
+               tmp[i++] = digits[num] | locase;
        /* Generic code, for any base:
        else do {
                tmp[i++] = (digits[do_div(num,base)] | locase);
@@ -611,7 +710,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt)
        }
        for (i = 0; i < 4; i++) {
                char temp[3];   /* hold each IP quad in reverse order */
-               int digits = put_dec_trunc(temp, addr[index]) - temp;
+               int digits = put_dec_trunc8(temp, addr[index]) - temp;
                if (leading_zeros) {
                        if (digits < 3)
                                *p++ = '0';
@@ -870,13 +969,15 @@ static noinline_for_stack
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
              struct printf_spec spec)
 {
+       int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0);
+
        if (!ptr && *fmt != 'K') {
                /*
                 * Print (null) with the same width as a pointer so it makes
                 * tabular output look nice.
                 */
                if (spec.field_width == -1)
-                       spec.field_width = 2 * sizeof(void *);
+                       spec.field_width = default_width;
                return string(buf, end, "(null)", spec);
        }
 
@@ -931,7 +1032,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                 */
                if (in_irq() || in_serving_softirq() || in_nmi()) {
                        if (spec.field_width == -1)
-                               spec.field_width = 2 * sizeof(void *);
+                               spec.field_width = default_width;
                        return string(buf, end, "pK-error", spec);
                }
                if (!((kptr_restrict == 0) ||
@@ -948,7 +1049,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
        }
        spec.flags |= SMALL;
        if (spec.field_width == -1) {
-               spec.field_width = 2 * sizeof(void *);
+               spec.field_width = default_width;
                spec.flags |= ZEROPAD;
        }
        spec.base = 16;
index b2176374b98e5e678ec93acda28cc0891d1c3717..82fed4eb2b6fe39cfd0476afe46e9589e02b386f 100644 (file)
@@ -389,3 +389,20 @@ config CLEANCACHE
          in a negligible performance hit.
 
          If unsure, say Y to enable cleancache
+
+config FRONTSWAP
+       bool "Enable frontswap to cache swap pages if tmem is present"
+       depends on SWAP
+       default n
+       help
+         Frontswap is so named because it can be thought of as the opposite
+         of a "backing" store for a swap device.  The data is stored into
+         "transcendent memory", memory that is not directly accessible or
+         addressable by the kernel and is of unknown and possibly
+         time-varying size.  When space in transcendent memory is available,
+         a significant swap I/O reduction may be achieved.  When none is
+         available, all frontswap calls are reduced to a single pointer-
+         compare-against-NULL resulting in a negligible performance hit
+         and swap data is stored as normal on the matching swap device.
+
+         If unsure, say Y to enable frontswap.
index a156285ce88d9a19e529b54b8836efac559b7af7..2e2fbbefb99fa94c97be13aa8fa71da823455409 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
 obj-$(CONFIG_BOUNCE)   += bounce.o
 obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_FRONTSWAP)        += frontswap.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
 obj-$(CONFIG_HUGETLBFS)        += hugetlb.o
 obj-$(CONFIG_NUMA)     += mempolicy.o
index ec4fcb7a56c8975492d656940906af6153136e51..bcb63ac48cc5e0d20eb7d1359b8d6d79a7358ac0 100644 (file)
@@ -698,7 +698,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                unsigned long size, unsigned long align,
                                unsigned long goal, unsigned long limit)
 {
@@ -710,6 +710,10 @@ again:
        if (ptr)
                return ptr;
 
+       /* do not panic in alloc_bootmem_bdata() */
+       if (limit && goal + size > limit)
+               limit = 0;
+
        ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
        if (ptr)
                return ptr;
index 5646c740f613ed1ec8b34a094f76d7934eed1aac..32e6f4136fa2297e13a6ac51444d50c18b78e9a3 100644 (file)
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(__cleancache_init_shared_fs);
 static int cleancache_get_key(struct inode *inode,
                              struct cleancache_filekey *key)
 {
-       int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
+       int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
        int len = 0, maxlen = CLEANCACHE_KEY_MAX;
        struct super_block *sb = inode->i_sb;
 
@@ -88,9 +88,7 @@ static int cleancache_get_key(struct inode *inode,
        if (sb->s_export_op != NULL) {
                fhfn = sb->s_export_op->encode_fh;
                if  (fhfn) {
-                       struct dentry d;
-                       d.d_inode = inode;
-                       len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
+                       len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
                        if (len <= 0 || len == 255)
                                return -1;
                        if (maxlen > CLEANCACHE_KEY_MAX)
index 4ac338af512099aae2dbb35c64bc96e822e8c162..2f42d952853970b5dd1f95a149d8750e7d570357 100644 (file)
@@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
         */
        while (unlikely(too_many_isolated(zone))) {
                /* async migration should just abort */
-               if (cc->mode != COMPACT_SYNC)
+               if (!cc->sync)
                        return 0;
 
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                 * satisfies the allocation
                 */
                pageblock_nr = low_pfn >> pageblock_order;
-               if (cc->mode != COMPACT_SYNC &&
-                   last_pageblock_nr != pageblock_nr &&
+               if (!cc->sync && last_pageblock_nr != pageblock_nr &&
                    !migrate_async_suitable(get_pageblock_migratetype(page))) {
                        low_pfn += pageblock_nr_pages;
                        low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
                }
 
-               if (cc->mode != COMPACT_SYNC)
+               if (!cc->sync)
                        mode |= ISOLATE_ASYNC_MIGRATE;
 
                lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 
 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
 #ifdef CONFIG_COMPACTION
-/*
- * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
- * converted to MIGRATE_MOVABLE type, false otherwise.
- */
-static bool rescue_unmovable_pageblock(struct page *page)
-{
-       unsigned long pfn, start_pfn, end_pfn;
-       struct page *start_page, *end_page;
-
-       pfn = page_to_pfn(page);
-       start_pfn = pfn & ~(pageblock_nr_pages - 1);
-       end_pfn = start_pfn + pageblock_nr_pages;
-
-       start_page = pfn_to_page(start_pfn);
-       end_page = pfn_to_page(end_pfn);
-
-       /* Do not deal with pageblocks that overlap zones */
-       if (page_zone(start_page) != page_zone(end_page))
-               return false;
-
-       for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
-                                                                 page++) {
-               if (!pfn_valid_within(pfn))
-                       continue;
-
-               if (PageBuddy(page)) {
-                       int order = page_order(page);
-
-                       pfn += (1 << order) - 1;
-                       page += (1 << order) - 1;
-
-                       continue;
-               } else if (page_count(page) == 0 || PageLRU(page))
-                       continue;
-
-               return false;
-       }
-
-       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-       move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
-       return true;
-}
-
-enum smt_result {
-       GOOD_AS_MIGRATION_TARGET,
-       FAIL_UNMOVABLE_TARGET,
-       FAIL_BAD_TARGET,
-};
 
-/*
- * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
- * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
- * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
- */
-static enum smt_result suitable_migration_target(struct page *page,
-                                     struct compact_control *cc)
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
 {
 
        int migratetype = get_pageblock_migratetype(page);
 
        /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
        if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-               return FAIL_BAD_TARGET;
+               return false;
 
        /* If the page is a large free page, then allow migration */
        if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return GOOD_AS_MIGRATION_TARGET;
+               return true;
 
        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
-       if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
-           migrate_async_suitable(migratetype))
-               return GOOD_AS_MIGRATION_TARGET;
-
-       if (cc->mode == COMPACT_ASYNC_MOVABLE &&
-           migratetype == MIGRATE_UNMOVABLE)
-               return FAIL_UNMOVABLE_TARGET;
-
-       if (cc->mode != COMPACT_ASYNC_MOVABLE &&
-           migratetype == MIGRATE_UNMOVABLE &&
-           rescue_unmovable_pageblock(page))
-               return GOOD_AS_MIGRATION_TARGET;
+       if (migrate_async_suitable(migratetype))
+               return true;
 
        /* Otherwise skip the block */
-       return FAIL_BAD_TARGET;
+       return false;
 }
 
 /*
@@ -477,13 +413,6 @@ static void isolate_freepages(struct zone *zone,
 
        zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 
-       /*
-        * isolate_freepages() may be called more than once during
-        * compact_zone_order() run and we want only the most recent
-        * count.
-        */
-       cc->nr_pageblocks_skipped = 0;
-
        /*
         * Isolate free pages until enough are available to migrate the
         * pages on cc->migratepages. We stop searching if the migrate
@@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone,
        for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
-               enum smt_result ret;
 
                if (!pfn_valid(pfn))
                        continue;
@@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone,
                        continue;
 
                /* Check the block is suitable for migration */
-               ret = suitable_migration_target(page, cc);
-               if (ret != GOOD_AS_MIGRATION_TARGET) {
-                       if (ret == FAIL_UNMOVABLE_TARGET)
-                               cc->nr_pageblocks_skipped++;
+               if (!suitable_migration_target(page))
                        continue;
-               }
+
                /*
                 * Found a block suitable for isolating free pages from. Now
                 * we disabled interrupts, double check things are ok and
@@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone,
                 */
                isolated = 0;
                spin_lock_irqsave(&zone->lock, flags);
-               ret = suitable_migration_target(page, cc);
-               if (ret == GOOD_AS_MIGRATION_TARGET) {
+               if (suitable_migration_target(page)) {
                        end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
                        isolated = isolate_freepages_block(pfn, end_pfn,
                                                           freelist, false);
                        nr_freepages += isolated;
-               } else if (ret == FAIL_UNMOVABLE_TARGET)
-                       cc->nr_pageblocks_skipped++;
+               }
                spin_unlock_irqrestore(&zone->lock, flags);
 
                /*
@@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
-                       (unsigned long)&cc->freepages, false,
-                       (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
-                                                     : MIGRATE_ASYNC);
+                               (unsigned long)cc, false,
+                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
@@ -779,8 +701,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_lru_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
+                       if (err == -ENOMEM) {
+                               ret = COMPACT_PARTIAL;
+                               goto out;
+                       }
                }
-
        }
 
 out:
@@ -793,8 +718,7 @@ out:
 
 static unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                enum compact_mode mode,
-                                unsigned long *nr_pageblocks_skipped)
+                                bool sync)
 {
        struct compact_control cc = {
                .nr_freepages = 0,
@@ -802,17 +726,12 @@ static unsigned long compact_zone_order(struct zone *zone,
                .order = order,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
-               .mode = mode,
+               .sync = sync,
        };
-       unsigned long rc;
-
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
 
-       rc = compact_zone(zone, &cc);
-       *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
-
-       return rc;
+       return compact_zone(zone, &cc);
 }
 
 int sysctl_extfrag_threshold = 500;
@@ -837,8 +756,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
        struct zoneref *z;
        struct zone *zone;
        int rc = COMPACT_SKIPPED;
-       unsigned long nr_pageblocks_skipped;
-       enum compact_mode mode;
 
        /*
         * Check whether it is worth even starting compaction. The order check is
@@ -855,22 +772,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                                                                nodemask) {
                int status;
 
-               mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
-retry:
-               status = compact_zone_order(zone, order, gfp_mask, mode,
-                                               &nr_pageblocks_skipped);
+               status = compact_zone_order(zone, order, gfp_mask, sync);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
                if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                        break;
-
-               if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
-                       if (nr_pageblocks_skipped) {
-                               mode = COMPACT_ASYNC_UNMOVABLE;
-                               goto retry;
-                       }
-               }
        }
 
        return rc;
@@ -904,7 +811,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                        if (ok && cc->order > zone->compact_order_failed)
                                zone->compact_order_failed = cc->order + 1;
                        /* Currently async compaction is never deferred. */
-                       else if (!ok && cc->mode == COMPACT_SYNC)
+                       else if (!ok && cc->sync)
                                defer_compaction(zone, cc->order);
                }
 
@@ -919,7 +826,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
 {
        struct compact_control cc = {
                .order = order,
-               .mode = COMPACT_ASYNC_MOVABLE,
+               .sync = false,
        };
 
        return __compact_pgdat(pgdat, &cc);
@@ -929,7 +836,7 @@ static int compact_node(int nid)
 {
        struct compact_control cc = {
                .order = -1,
-               .mode = COMPACT_SYNC,
+               .sync = true,
        };
 
        return __compact_pgdat(NODE_DATA(nid), &cc);
index 64b48f934b897451154e5517b38704fb19e4f86a..a4a5260b0279b77b37738540b1e8c24fb446a3e5 100644 (file)
@@ -1899,71 +1899,6 @@ struct page *read_cache_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page);
 
-/*
- * The logic we want is
- *
- *     if suid or (sgid and xgrp)
- *             remove privs
- */
-int should_remove_suid(struct dentry *dentry)
-{
-       umode_t mode = dentry->d_inode->i_mode;
-       int kill = 0;
-
-       /* suid always must be killed */
-       if (unlikely(mode & S_ISUID))
-               kill = ATTR_KILL_SUID;
-
-       /*
-        * sgid without any exec bits is just a mandatory locking mark; leave
-        * it alone.  If some exec bits are set, it's a real sgid; kill it.
-        */
-       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-               kill |= ATTR_KILL_SGID;
-
-       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
-               return kill;
-
-       return 0;
-}
-EXPORT_SYMBOL(should_remove_suid);
-
-static int __remove_suid(struct dentry *dentry, int kill)
-{
-       struct iattr newattrs;
-
-       newattrs.ia_valid = ATTR_FORCE | kill;
-       return notify_change(dentry, &newattrs);
-}
-
-int file_remove_suid(struct file *file)
-{
-       struct dentry *dentry = file->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       int killsuid;
-       int killpriv;
-       int error = 0;
-
-       /* Fast path for nothing security related */
-       if (IS_NOSEC(inode))
-               return 0;
-
-       killsuid = should_remove_suid(dentry);
-       killpriv = security_inode_need_killpriv(dentry);
-
-       if (killpriv < 0)
-               return killpriv;
-       if (killpriv)
-               error = security_inode_killpriv(dentry);
-       if (!error && killsuid)
-               error = __remove_suid(dentry, killsuid);
-       if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-               inode->i_flags |= S_NOSEC;
-
-       return error;
-}
-EXPORT_SYMBOL(file_remove_suid);
-
 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
 {
@@ -2489,7 +2424,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                goto out;
 
-       file_update_time(file);
+       err = file_update_time(file);
+       if (err)
+               goto out;
 
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (unlikely(file->f_flags & O_DIRECT)) {
index a4eb3113222912c9aada14bd92c6b68d01577b73..213ca1f5340980e1ce6fad8d4f12e50858d61397 100644 (file)
@@ -426,7 +426,9 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        if (ret)
                goto out_backing;
 
-       file_update_time(filp);
+       ret = file_update_time(filp);
+       if (ret)
+               goto out_backing;
 
        ret = __xip_file_write (filp, buf, count, pos, ppos);
 
diff --git a/mm/frontswap.c b/mm/frontswap.c
new file mode 100644 (file)
index 0000000..e250255
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * Frontswap frontend
+ *
+ * This code provides the generic "frontend" layer to call a matching
+ * "backend" driver implementation of frontswap.  See
+ * Documentation/vm/frontswap.txt for more information.
+ *
+ * Copyright (C) 2009-2012 Oracle Corp.  All rights reserved.
+ * Author: Dan Magenheimer
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
+
+/*
+ * frontswap_ops is set by frontswap_register_ops to contain the pointers
+ * to the frontswap "backend" implementation functions.
+ */
+static struct frontswap_ops frontswap_ops __read_mostly;
+
+/*
+ * This global enablement flag reduces overhead on systems where frontswap_ops
+ * has not been registered, so is preferred to the slower alternative: a
+ * function call that checks a non-global.
+ */
+bool frontswap_enabled __read_mostly;
+EXPORT_SYMBOL(frontswap_enabled);
+
+/*
+ * If enabled, frontswap_store will return failure even on success.  As
+ * a result, the swap subsystem will always write the page to swap, in
+ * effect converting frontswap into a writethrough cache.  In this mode,
+ * there is no direct reduction in swap writes, but a frontswap backend
+ * can unilaterally "reclaim" any pages in use with no data loss, thus
+ * providing increases control over maximum memory usage due to frontswap.
+ */
+static bool frontswap_writethrough_enabled __read_mostly;
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * Counters available via /sys/kernel/debug/frontswap (if debugfs is
+ * properly configured).  These are for information only so are not protected
+ * against increment races.
+ */
+static u64 frontswap_loads;
+static u64 frontswap_succ_stores;
+static u64 frontswap_failed_stores;
+static u64 frontswap_invalidates;
+
+static inline void inc_frontswap_loads(void) {
+       frontswap_loads++;
+}
+static inline void inc_frontswap_succ_stores(void) {
+       frontswap_succ_stores++;
+}
+static inline void inc_frontswap_failed_stores(void) {
+       frontswap_failed_stores++;
+}
+static inline void inc_frontswap_invalidates(void) {
+       frontswap_invalidates++;
+}
+#else
+static inline void inc_frontswap_loads(void) { }
+static inline void inc_frontswap_succ_stores(void) { }
+static inline void inc_frontswap_failed_stores(void) { }
+static inline void inc_frontswap_invalidates(void) { }
+#endif
+/*
+ * Register operations for frontswap, returning previous thus allowing
+ * detection of multiple backends and possible nesting.
+ */
+struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
+{
+       struct frontswap_ops old = frontswap_ops;
+
+       frontswap_ops = *ops;
+       frontswap_enabled = true;
+       return old;
+}
+EXPORT_SYMBOL(frontswap_register_ops);
+
+/*
+ * Enable/disable frontswap writethrough (see above).
+ */
+void frontswap_writethrough(bool enable)
+{
+       frontswap_writethrough_enabled = enable;
+}
+EXPORT_SYMBOL(frontswap_writethrough);
+
+/*
+ * Called when a swap device is swapon'd.
+ */
+void __frontswap_init(unsigned type)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (sis->frontswap_map == NULL)
+               return;
+       if (frontswap_enabled)
+               (*frontswap_ops.init)(type);
+}
+EXPORT_SYMBOL(__frontswap_init);
+
+/*
+ * "Store" data from a page to frontswap and associate it with the page's
+ * swaptype and offset.  Page must be locked and in the swap cache.
+ * If frontswap already contains a page with matching swaptype and
+ * offset, the frontswap implmentation may either overwrite the data and
+ * return success or invalidate the page from frontswap and return failure.
+ */
+int __frontswap_store(struct page *page)
+{
+       int ret = -1, dup = 0;
+       swp_entry_t entry = { .val = page_private(page), };
+       int type = swp_type(entry);
+       struct swap_info_struct *sis = swap_info[type];
+       pgoff_t offset = swp_offset(entry);
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset))
+               dup = 1;
+       ret = (*frontswap_ops.store)(type, offset, page);
+       if (ret == 0) {
+               frontswap_set(sis, offset);
+               inc_frontswap_succ_stores();
+               if (!dup)
+                       atomic_inc(&sis->frontswap_pages);
+       } else if (dup) {
+               /*
+                 failed dup always results in automatic invalidate of
+                 the (older) page from frontswap
+                */
+               frontswap_clear(sis, offset);
+               atomic_dec(&sis->frontswap_pages);
+               inc_frontswap_failed_stores();
+       } else
+               inc_frontswap_failed_stores();
+       if (frontswap_writethrough_enabled)
+               /* report failure so swap also writes to swap device */
+               ret = -1;
+       return ret;
+}
+EXPORT_SYMBOL(__frontswap_store);
+
+/*
+ * "Get" data from frontswap associated with swaptype and offset that were
+ * specified when the data was put to frontswap and use it to fill the
+ * specified page with data. Page must be locked and in the swap cache.
+ */
+int __frontswap_load(struct page *page)
+{
+       int ret = -1;
+       swp_entry_t entry = { .val = page_private(page), };
+       int type = swp_type(entry);
+       struct swap_info_struct *sis = swap_info[type];
+       pgoff_t offset = swp_offset(entry);
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset))
+               ret = (*frontswap_ops.load)(type, offset, page);
+       if (ret == 0)
+               inc_frontswap_loads();
+       return ret;
+}
+EXPORT_SYMBOL(__frontswap_load);
+
+/*
+ * Invalidate any data from frontswap associated with the specified swaptype
+ * and offset so that a subsequent "get" will fail.
+ */
+void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset)) {
+               (*frontswap_ops.invalidate_page)(type, offset);
+               atomic_dec(&sis->frontswap_pages);
+               frontswap_clear(sis, offset);
+               inc_frontswap_invalidates();
+       }
+}
+EXPORT_SYMBOL(__frontswap_invalidate_page);
+
+/*
+ * Invalidate all data from frontswap associated with all offsets for the
+ * specified swaptype.
+ */
+void __frontswap_invalidate_area(unsigned type)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (sis->frontswap_map == NULL)
+               return;
+       (*frontswap_ops.invalidate_area)(type);
+       atomic_set(&sis->frontswap_pages, 0);
+       memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+}
+EXPORT_SYMBOL(__frontswap_invalidate_area);
+
+/*
+ * Frontswap, like a true swap device, may unnecessarily retain pages
+ * under certain circumstances; "shrink" frontswap is essentially a
+ * "partial swapoff" and works by calling try_to_unuse to attempt to
+ * unuse enough frontswap pages to attempt to -- subject to memory
+ * constraints -- reduce the number of pages in frontswap to the
+ * number given in the parameter target_pages.
+ */
+void frontswap_shrink(unsigned long target_pages)
+{
+       struct swap_info_struct *si = NULL;
+       int si_frontswap_pages;
+       unsigned long total_pages = 0, total_pages_to_unuse;
+       unsigned long pages = 0, pages_to_unuse = 0;
+       int type;
+       bool locked = false;
+
+       /*
+        * we don't want to hold swap_lock while doing a very
+        * lengthy try_to_unuse, but swap_list may change
+        * so restart scan from swap_list.head each time
+        */
+       spin_lock(&swap_lock);
+       locked = true;
+       total_pages = 0;
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               total_pages += atomic_read(&si->frontswap_pages);
+       }
+       if (total_pages <= target_pages)
+               goto out;
+       total_pages_to_unuse = total_pages - target_pages;
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               si_frontswap_pages = atomic_read(&si->frontswap_pages);
+               if (total_pages_to_unuse < si_frontswap_pages)
+                       pages = pages_to_unuse = total_pages_to_unuse;
+               else {
+                       pages = si_frontswap_pages;
+                       pages_to_unuse = 0; /* unuse all */
+               }
+               /* ensure there is enough RAM to fetch pages from frontswap */
+               if (security_vm_enough_memory_mm(current->mm, pages))
+                       continue;
+               vm_unacct_memory(pages);
+               break;
+       }
+       if (type < 0)
+               goto out;
+       locked = false;
+       spin_unlock(&swap_lock);
+       try_to_unuse(type, true, pages_to_unuse);
+out:
+       if (locked)
+               spin_unlock(&swap_lock);
+       return;
+}
+EXPORT_SYMBOL(frontswap_shrink);
+
+/*
+ * Count and return the number of frontswap pages across all
+ * swap devices.  This is exported so that backend drivers can
+ * determine current usage without reading debugfs.
+ */
+unsigned long frontswap_curr_pages(void)
+{
+       int type;
+       unsigned long totalpages = 0;
+       struct swap_info_struct *si = NULL;
+
+       spin_lock(&swap_lock);
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               totalpages += atomic_read(&si->frontswap_pages);
+       }
+       spin_unlock(&swap_lock);
+       return totalpages;
+}
+EXPORT_SYMBOL(frontswap_curr_pages);
+
+static int __init init_frontswap(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *root = debugfs_create_dir("frontswap", NULL);
+       if (root == NULL)
+               return -ENXIO;
+       debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
+       debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
+       debugfs_create_u64("failed_stores", S_IRUGO, root,
+                               &frontswap_failed_stores);
+       debugfs_create_u64("invalidates", S_IRUGO,
+                               root, &frontswap_invalidates);
+#endif
+       return 0;
+}
+
+module_init(init_frontswap);
index 4194ab9dc19b412aa8e15f1b89612aa2595f0c9f..2ba87fbfb75b9755e279d39af93359693afe66fd 100644 (file)
@@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page);
 /*
  * in mm/page_alloc.c
  */
-extern void set_pageblock_migratetype(struct page *page, int migratetype);
-extern int move_freepages_block(struct zone *zone, struct page *page,
-                               int migratetype);
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
@@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page);
 #endif
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
-#include <linux/compaction.h>
 
 /*
  * in mm/compaction.c
@@ -123,14 +119,11 @@ struct compact_control {
        unsigned long nr_migratepages;  /* Number of pages to migrate */
        unsigned long free_pfn;         /* isolate_freepages search base */
        unsigned long migrate_pfn;      /* isolate_migratepages search base */
-       enum compact_mode mode;         /* Compaction mode */
+       bool sync;                      /* Synchronous migration */
 
        int order;                      /* order a direct compactor needs */
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
        struct zone *zone;
-
-       /* Number of UNMOVABLE destination pageblocks skipped during scan */
-       unsigned long nr_pageblocks_skipped;
 };
 
 unsigned long
@@ -350,3 +343,7 @@ extern u64 hwpoison_filter_flags_mask;
 extern u64 hwpoison_filter_flags_value;
 extern u64 hwpoison_filter_memcg;
 extern u32 hwpoison_filter_enable;
+
+extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
+        unsigned long, unsigned long,
+        unsigned long, unsigned long);
index deff1b64a08c36ef4857590e9913717488953014..14d260fa0d17939a2279c244df91789cd30720e4 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/ksm.h>
 #include <linux/fs.h>
+#include <linux/file.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -204,14 +205,16 @@ static long madvise_remove(struct vm_area_struct *vma,
 {
        loff_t offset;
        int error;
+       struct file *f;
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
 
        if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
                return -EINVAL;
 
-       if (!vma->vm_file || !vma->vm_file->f_mapping
-               || !vma->vm_file->f_mapping->host) {
+       f = vma->vm_file;
+
+       if (!f || !f->f_mapping || !f->f_mapping->host) {
                        return -EINVAL;
        }
 
@@ -221,11 +224,18 @@ static long madvise_remove(struct vm_area_struct *vma,
        offset = (loff_t)(start - vma->vm_start)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-       /* filesystem's fallocate may need to take i_mutex */
+       /*
+        * Filesystem's fallocate may need to take i_mutex.  We need to
+        * explicitly grab a reference because the vma (and hence the
+        * vma's reference to the file) can go away as soon as we drop
+        * mmap_sem.
+        */
+       get_file(f);
        up_read(&current->mm->mmap_sem);
-       error = do_fallocate(vma->vm_file,
+       error = do_fallocate(f,
                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                offset, end - start);
+       fput(f);
        down_read(&current->mm->mmap_sem);
        return error;
 }
index 952123eba43371a5e6d26ff8a5b7ad934747c027..5cc6731b00ccd05ff2f5b610627a0a2848dc5544 100644 (file)
@@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
                                           MAX_NUMNODES);
 }
 
-/*
- * Free memblock.reserved.regions
- */
-int __init_memblock memblock_free_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_free(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
-/*
- * Reserve memblock.reserved.regions
- */
-int __init_memblock memblock_reserve_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_reserve(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 {
        type->total_size -= type->regions[r].size;
@@ -184,9 +160,39 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
        }
 }
 
-static int __init_memblock memblock_double_array(struct memblock_type *type)
+phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
+                                       phys_addr_t *addr)
+{
+       if (memblock.reserved.regions == memblock_reserved_init_regions)
+               return 0;
+
+       *addr = __pa(memblock.reserved.regions);
+
+       return PAGE_ALIGN(sizeof(struct memblock_region) *
+                         memblock.reserved.max);
+}
+
+/**
+ * memblock_double_array - double the size of the memblock regions array
+ * @type: memblock type of the regions array being doubled
+ * @new_area_start: starting address of memory range to avoid overlap with
+ * @new_area_size: size of memory range to avoid overlap with
+ *
+ * Double the size of the @type regions array. If memblock is being used to
+ * allocate memory for a new reserved regions array and there is a previously
+ * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
+ * waiting to be reserved, ensure the memory used by the new array does
+ * not overlap.
+ *
+ * RETURNS:
+ * 0 on success, -1 on failure.
+ */
+static int __init_memblock memblock_double_array(struct memblock_type *type,
+                                               phys_addr_t new_area_start,
+                                               phys_addr_t new_area_size)
 {
        struct memblock_region *new_array, *old_array;
+       phys_addr_t old_alloc_size, new_alloc_size;
        phys_addr_t old_size, new_size, addr;
        int use_slab = slab_is_available();
        int *in_slab;
@@ -200,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
        /* Calculate new doubled size */
        old_size = type->max * sizeof(struct memblock_region);
        new_size = old_size << 1;
+       /*
+        * We need to allocated new one align to PAGE_SIZE,
+        *   so we can free them completely later.
+        */
+       old_alloc_size = PAGE_ALIGN(old_size);
+       new_alloc_size = PAGE_ALIGN(new_size);
 
        /* Retrieve the slab flag */
        if (type == &memblock.memory)
@@ -222,7 +234,18 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
                new_array = kmalloc(new_size, GFP_KERNEL);
                addr = new_array ? __pa(new_array) : 0;
        } else {
-               addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+               /* only exclude range when trying to double reserved.regions */
+               if (type != &memblock.reserved)
+                       new_area_start = new_area_size = 0;
+
+               addr = memblock_find_in_range(new_area_start + new_area_size,
+                                               memblock.current_limit,
+                                               new_alloc_size, PAGE_SIZE);
+               if (!addr && new_area_size)
+                       addr = memblock_find_in_range(0,
+                                       min(new_area_start, memblock.current_limit),
+                                       new_alloc_size, PAGE_SIZE);
+
                new_array = addr ? __va(addr) : 0;
        }
        if (!addr) {
@@ -251,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
                kfree(old_array);
        else if (old_array != memblock_memory_init_regions &&
                 old_array != memblock_reserved_init_regions)
-               memblock_free(__pa(old_array), old_size);
+               memblock_free(__pa(old_array), old_alloc_size);
 
        /* Reserve the new array if that comes from the memblock.
         * Otherwise, we needn't do it
         */
        if (!use_slab)
-               BUG_ON(memblock_reserve(addr, new_size));
+               BUG_ON(memblock_reserve(addr, new_alloc_size));
 
        /* Update slab flag */
        *in_slab = use_slab;
@@ -399,7 +422,7 @@ repeat:
         */
        if (!insert) {
                while (type->cnt + nr_new > type->max)
-                       if (memblock_double_array(type) < 0)
+                       if (memblock_double_array(type, obase, size) < 0)
                                return -ENOMEM;
                insert = true;
                goto repeat;
@@ -450,7 +473,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 
        /* we'll create at most two more regions */
        while (type->cnt + 2 > type->max)
-               if (memblock_double_array(type) < 0)
+               if (memblock_double_array(type, base, size) < 0)
                        return -ENOMEM;
 
        for (i = 0; i < type->cnt; i++) {
@@ -540,9 +563,9 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  * __next_free_mem_range - next function for for_each_free_mem_range()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Find the first free area from *@idx which matches @nid, fill the out
  * parameters, and update *@idx for the next iteration.  The lower 32bit of
@@ -616,9 +639,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Reverse of __next_free_mem_range().
  */
@@ -867,6 +890,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
        return memblock_search(&memblock.memory, addr) != -1;
 }
 
+/**
+ * memblock_is_region_memory - check if a region is a subset of memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) is a subset of a memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 {
        int idx = memblock_search(&memblock.memory, base);
@@ -879,6 +912,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size
                 memblock.memory.regions[idx].size) >= end;
 }
 
+/**
+ * memblock_is_region_reserved - check if a region intersects reserved memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) intersects a reserved memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 {
        memblock_cap_size(base, &size);
index ac35bccadb7b9f53606d445a961e442e891aa94a..f72b5e52451a7d8e62648dbfe211e5ff11c7c34f 100644 (file)
@@ -1148,7 +1148,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
 {
        if (root_memcg == memcg)
                return true;
-       if (!root_memcg->use_hierarchy)
+       if (!root_memcg->use_hierarchy || !memcg)
                return false;
        return css_is_ancestor(&memcg->css, &root_memcg->css);
 }
@@ -1234,7 +1234,7 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 
 /**
  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
- * @mem: the memory cgroup
+ * @memcg: the memory cgroup
  *
  * Returns the maximum amount of memory @mem can be charged with, in
  * pages.
@@ -1508,7 +1508,7 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
 
 /**
  * test_mem_cgroup_node_reclaimable
- * @mem: the target memcg
+ * @memcg: the target memcg
  * @nid: the node ID to be checked.
  * @noswap : specify true here if the user wants flle only information.
  *
index 1b7dc662bf9f229063cb3e7b97e8e4c22147b92b..2466d1250231f3e2405429ea4de4a97c597d0074 100644 (file)
@@ -1225,7 +1225,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE) {
-                               VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+#ifdef CONFIG_DEBUG_VM
+                               if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
+                                       pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
+                                               __func__, addr, end,
+                                               vma->vm_start,
+                                               vma->vm_end);
+                                       BUG();
+                               }
+#endif
                                split_huge_page_pmd(vma->vm_mm, pmd);
                        } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
@@ -1366,7 +1374,7 @@ void unmap_vmas(struct mmu_gather *tlb,
 /**
  * zap_page_range - remove user pages in a given range
  * @vma: vm_area_struct holding the applicable pages
- * @address: starting address of pages to zap
+ * @start: starting address of pages to zap
  * @size: number of bytes to zap
  * @details: details of nonlinear truncation or shared cache invalidation
  *
index 0d7e3ec8e0f3cc997b5fa0b422f5fa39caffe413..427bb291dd0fdeeb6db93c86c66df6a6f92c9b28 100644 (file)
@@ -618,7 +618,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
                pgdat = hotadd_new_pgdat(nid, start);
                ret = -ENOMEM;
                if (!pgdat)
-                       goto out;
+                       goto error;
                new_pgdat = 1;
        }
 
index f15c1b24ca1822c7808b1ea825ca932a9e063b23..1d771e4200d222eea46458bdd6089279a78ffeb5 100644 (file)
@@ -1177,7 +1177,7 @@ static long do_mbind(unsigned long start, unsigned long len,
                if (!list_empty(&pagelist)) {
                        nr_failed = migrate_pages(&pagelist, new_vma_page,
                                                (unsigned long)vma,
-                                               false, true);
+                                               false, MIGRATE_SYNC);
                        if (nr_failed)
                                putback_lru_pages(&pagelist);
                }
index d9049811f3521bc690ff1535831b87dc3b682fe4..54990476c049b2fa60c5e740a0533ea70df1f856 100644 (file)
@@ -63,19 +63,21 @@ EXPORT_SYMBOL(mempool_destroy);
 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
                                mempool_free_t *free_fn, void *pool_data)
 {
-       return  mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
+       return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
+                                  GFP_KERNEL, NUMA_NO_NODE);
 }
 EXPORT_SYMBOL(mempool_create);
 
 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
-                       mempool_free_t *free_fn, void *pool_data, int node_id)
+                              mempool_free_t *free_fn, void *pool_data,
+                              gfp_t gfp_mask, int node_id)
 {
        mempool_t *pool;
-       pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
+       pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
        if (!pool)
                return NULL;
        pool->elements = kmalloc_node(min_nr * sizeof(void *),
-                                       GFP_KERNEL, node_id);
+                                     gfp_mask, node_id);
        if (!pool->elements) {
                kfree(pool);
                return NULL;
@@ -93,7 +95,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
        while (pool->curr_nr < pool->min_nr) {
                void *element;
 
-               element = pool->alloc(GFP_KERNEL, pool->pool_data);
+               element = pool->alloc(gfp_mask, pool->pool_data);
                if (unlikely(!element)) {
                        mempool_destroy(pool);
                        return NULL;
index ab81d482ae6f1cac508fce796c8902432e32c3ad..be26d5cbe56b34d63f8c8ac8b799782f9ef6424b 100644 (file)
@@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page)
                 * is actually a signal that all of the page has become dirty.
                 * Whereas only part of our page may be dirty.
                 */
-               __set_page_dirty_nobuffers(newpage);
+               if (PageSwapBacked(page))
+                       SetPageDirty(newpage);
+               else
+                       __set_page_dirty_nobuffers(newpage);
        }
 
        mlock_migrate_page(newpage, page);
index 4a9c2a391e28efe523e1a3aaaf9ab06d24627e86..3edfcdfa42d9f27a5238780065220ec3b4fc702a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -971,15 +971,13 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
-static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, unsigned long pgoff)
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
        vm_flags_t vm_flags;
-       int error;
-       unsigned long reqprot = prot;
 
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1101,39 +1099,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                }
        }
 
-       error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
-       if (error)
-               return error;
-
        return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       if (unlikely(offset + PAGE_ALIGN(len) < offset))
-               return -EINVAL;
-       if (unlikely(offset & ~PAGE_MASK))
-               return -EINVAL;
-       return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(do_mmap);
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       unsigned long ret;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-       ret = do_mmap(file, addr, len, prot, flag, offset);
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -1165,10 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
-
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
        if (file)
                fput(file);
 out:
@@ -1629,7 +1594,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (addr & ~PAGE_MASK)
                return -EINVAL;
 
-       return arch_rebalance_pgtables(addr, len);
+       addr = arch_rebalance_pgtables(addr, len);
+       error = security_mmap_addr(addr);
+       return error ? error : addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1819,7 +1786,7 @@ int expand_downwards(struct vm_area_struct *vma,
                return -ENOMEM;
 
        address &= PAGE_MASK;
-       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+       error = security_mmap_addr(address);
        if (error)
                return error;
 
@@ -2159,7 +2126,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 
        return 0;
 }
-EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {
@@ -2207,10 +2173,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        if (!len)
                return addr;
 
-       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
-       if (error)
-               return error;
-
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2563,10 +2525,6 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
 
-       ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-       if (ret)
-               goto out;
-
        ret = insert_vm_struct(mm, vma);
        if (ret)
                goto out;
index db8d983b5a7d7a2d6746ccbf74471d2ced3cdd96..21fed202ddad865bb3ee70d07d8ebce17fd37493 100644 (file)
@@ -371,10 +371,6 @@ static unsigned long mremap_to(unsigned long addr,
        if ((addr <= new_addr) && (addr+old_len) > new_addr)
                goto out;
 
-       ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-       if (ret)
-               goto out;
-
        ret = do_munmap(mm, new_addr, new_len);
        if (ret)
                goto out;
@@ -432,15 +428,17 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
  * This option implies MREMAP_MAYMOVE.
  */
-unsigned long do_mremap(unsigned long addr,
-       unsigned long old_len, unsigned long new_len,
-       unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+               unsigned long, new_len, unsigned long, flags,
+               unsigned long, new_addr)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        unsigned long ret = -EINVAL;
        unsigned long charged = 0;
 
+       down_write(&current->mm->mmap_sem);
+
        if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
                goto out;
 
@@ -530,25 +528,11 @@ unsigned long do_mremap(unsigned long addr,
                        goto out;
                }
 
-               ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-               if (ret)
-                       goto out;
                ret = move_vma(vma, addr, old_len, new_len, new_addr);
        }
 out:
        if (ret & ~PAGE_MASK)
                vm_unacct_memory(charged);
-       return ret;
-}
-
-SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
-               unsigned long, new_len, unsigned long, flags,
-               unsigned long, new_addr)
-{
-       unsigned long ret;
-
-       down_write(&current->mm->mmap_sem);
-       ret = do_mremap(addr, old_len, new_len, flags, new_addr);
        up_write(&current->mm->mmap_sem);
        return ret;
 }
index d23415c001bc4c5847986c7659261ca335888382..405573010f99a8b0d877dd980e284979ebd2ec69 100644 (file)
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
                __free_pages_bootmem(pfn_to_page(i), 0);
 }
 
+static unsigned long __init __free_memory_core(phys_addr_t start,
+                                phys_addr_t end)
+{
+       unsigned long start_pfn = PFN_UP(start);
+       unsigned long end_pfn = min_t(unsigned long,
+                                     PFN_DOWN(end), max_low_pfn);
+
+       if (start_pfn > end_pfn)
+               return 0;
+
+       __free_pages_memory(start_pfn, end_pfn);
+
+       return end_pfn - start_pfn;
+}
+
 unsigned long __init free_low_memory_core_early(int nodeid)
 {
        unsigned long count = 0;
-       phys_addr_t start, end;
+       phys_addr_t start, end, size;
        u64 i;
 
-       /* free reserved array temporarily so that it's treated as free area */
-       memblock_free_reserved_regions();
-
-       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
-               unsigned long start_pfn = PFN_UP(start);
-               unsigned long end_pfn = min_t(unsigned long,
-                                             PFN_DOWN(end), max_low_pfn);
-               if (start_pfn < end_pfn) {
-                       __free_pages_memory(start_pfn, end_pfn);
-                       count += end_pfn - start_pfn;
-               }
-       }
+       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
+               count += __free_memory_core(start, end);
+
+       /* free range that is used for reserved array if we allocate it */
+       size = get_allocated_memblock_reserved_regions_info(&start);
+       if (size)
+               count += __free_memory_core(start, start + size);
 
-       /* put region array back? */
-       memblock_reserve_reserved_regions();
        return count;
 }
 
@@ -274,7 +282,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                                   unsigned long size,
                                                   unsigned long align,
                                                   unsigned long goal,
index bb8f4f004a82ce57abb0653a9a8ed72d533f5c45..d4b0c10872de59d8959262b1daac92d5d60eb80a 100644 (file)
@@ -889,7 +889,6 @@ static int validate_mmap_request(struct file *file,
                                 unsigned long *_capabilities)
 {
        unsigned long capabilities, rlen;
-       unsigned long reqprot = prot;
        int ret;
 
        /* do the simple checks first */
@@ -1047,7 +1046,7 @@ static int validate_mmap_request(struct file *file,
        }
 
        /* allow the security API to have its say */
-       ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
+       ret = security_mmap_addr(addr);
        if (ret < 0)
                return ret;
 
@@ -1233,7 +1232,7 @@ enomem:
 /*
  * handle mapping creation for uClinux
  */
-static unsigned long do_mmap_pgoff(struct file *file,
+unsigned long do_mmap_pgoff(struct file *file,
                            unsigned long addr,
                            unsigned long len,
                            unsigned long prot,
@@ -1471,32 +1470,6 @@ error_getting_region:
        return -ENOMEM;
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       if (unlikely(offset + PAGE_ALIGN(len) < offset))
-               return -EINVAL;
-       if (unlikely(offset & ~PAGE_MASK))
-               return -EINVAL;
-       return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(do_mmap);
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       unsigned long ret;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-       ret = do_mmap(file, addr, len, prot, flag, offset);
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -1513,9 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 
        if (file)
                fput(file);
index ed0e19677360fa55f62e3944208e82cab7eeacc8..ac300c99baf644824f9c6532627a99fdb26020b1 100644 (file)
@@ -183,7 +183,8 @@ static bool oom_unkillable_task(struct task_struct *p,
 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
                          const nodemask_t *nodemask, unsigned long totalpages)
 {
-       unsigned long points;
+       long points;
+       long adj;
 
        if (oom_unkillable_task(p, memcg, nodemask))
                return 0;
@@ -192,7 +193,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
        if (!p)
                return 0;
 
-       if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+       adj = p->signal->oom_score_adj;
+       if (adj == OOM_SCORE_ADJ_MIN) {
                task_unlock(p);
                return 0;
        }
@@ -210,20 +212,17 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
         * implementation used by LSMs.
         */
        if (has_capability_noaudit(p, CAP_SYS_ADMIN))
-               points -= 30 * totalpages / 1000;
+               adj -= 30;
 
-       /*
-        * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
-        * either completely disable oom killing or always prefer a certain
-        * task.
-        */
-       points += p->signal->oom_score_adj * totalpages / 1000;
+       /* Normalize to oom_score_adj units */
+       adj *= totalpages / 1000;
+       points += adj;
 
        /*
         * Never return 0 for an eligible task regardless of the root bonus and
         * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
         */
-       return points ? points : 1;
+       return points > 0 ? points : 1;
 }
 
 /*
@@ -366,7 +365,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 
 /**
  * dump_tasks - dump current memory state of all system tasks
- * @mem: current's memory controller, if constrained
+ * @memcg: current's memory controller, if constrained
  * @nodemask: nodemask passed to page allocator for mempolicy ooms
  *
  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
index 6092f331b32e496bb522f4f17741c3594cc4ba0d..4a4f9219683f63d8594c143fa24f27d4daa6a8ff 100644 (file)
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
 
 int page_group_by_mobility_disabled __read_mostly;
 
-void set_pageblock_migratetype(struct page *page, int migratetype)
+static void set_pageblock_migratetype(struct page *page, int migratetype)
 {
 
        if (unlikely(page_group_by_mobility_disabled))
@@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
        return pages_moved;
 }
 
-int move_freepages_block(struct zone *zone, struct page *page,
-                        int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+                               int migratetype)
 {
        unsigned long start_pfn, end_pfn;
        struct page *start_page, *end_page;
@@ -5635,7 +5635,12 @@ static struct page *
 __alloc_contig_migrate_alloc(struct page *page, unsigned long private,
                             int **resultp)
 {
-       return alloc_page(GFP_HIGHUSER_MOVABLE);
+       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+       if (PageHighMem(page))
+               gfp_mask |= __GFP_HIGHMEM;
+
+       return alloc_page(gfp_mask);
 }
 
 /* [start, end) must belong to a single zone. */
@@ -5651,7 +5656,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
                .nr_migratepages = 0,
                .order = -1,
                .zone = page_zone(pfn_to_page(start)),
-               .mode = COMPACT_SYNC,
+               .sync = true,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index 1ccbd714059cdd7ddb9d90cb3ca92438f2cb5d34..eb750f851395b4e726c06f3926d42b472ab1bdf9 100644 (file)
@@ -392,7 +392,7 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
 
 /**
  * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
- * @end: swap entry to be cmpxchged
+ * @ent: swap entry to be cmpxchged
  * @old: old id
  * @new: new id
  *
@@ -422,7 +422,7 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
 /**
  * swap_cgroup_record - record mem_cgroup for this swp_entry.
  * @ent: swap entry to be recorded into
- * @mem: mem_cgroup to be recorded
+ * @id: mem_cgroup to be recorded
  *
  * Returns old value at success, 0 at failure.
  * (Of course, old value can be 0.)
index dc76b4d0611ecb59fd85d89a78896c792443a62f..34f02923744c921fa2d990ec68f220a698049362 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bio.h>
 #include <linux/swapops.h>
 #include <linux/writeback.h>
+#include <linux/frontswap.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                unlock_page(page);
                goto out;
        }
+       if (frontswap_store(page) == 0) {
+               set_page_writeback(page);
+               unlock_page(page);
+               end_page_writeback(page);
+               goto out;
+       }
        bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
        if (bio == NULL) {
                set_page_dirty(page);
@@ -122,6 +129,11 @@ int swap_readpage(struct page *page)
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(PageUptodate(page));
+       if (frontswap_load(page) == 0) {
+               SetPageUptodate(page);
+               unlock_page(page);
+               goto out;
+       }
        bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
        if (bio == NULL) {
                unlock_page(page);
index aa9701e12714af2ce7ead752def02200910bf2b1..6c118d012bb5a27be54ab331a4491e3ec421ac31 100644 (file)
@@ -162,7 +162,6 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
 
 /**
  * walk_page_range - walk a memory map's page tables with a callback
- * @mm: memory map to walk
  * @addr: starting address
  * @end: ending address
  * @walk: set of callbacks to invoke for each level of the tree
index 405d331804c3da95c52347878387a807eb5e6c60..3707c71ae4cddbec027eac857291185c662c760a 100644 (file)
@@ -360,7 +360,6 @@ err_free:
  * @chunk: chunk to depopulate
  * @off: offset to the area to depopulate
  * @size: size of the area to depopulate in bytes
- * @flush: whether to flush cache and tlb or not
  *
  * For each cpu, depopulate and unmap pages [@page_start,@page_end)
  * from @chunk.  If @flush is true, vcache is flushed before unmapping
index c20ff48994c29050953c79fcdb0633e690bb653e..926b466497492f3f8463ebc623adc4fbddf9547a 100644 (file)
@@ -371,15 +371,15 @@ static ssize_t process_vm_rw(pid_t pid,
        /* Check iovecs */
        if (vm_write)
                rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l, 1);
+                                          iovstack_l, &iov_l);
        else
                rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l, 1);
+                                          iovstack_l, &iov_l);
        if (rc <= 0)
                goto free_iovecs;
 
-       rc = rw_copy_check_uvector(READ, rvec, riovcnt, UIO_FASTIOV,
-                                  iovstack_r, &iov_r, 0);
+       rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
+                                  iovstack_r, &iov_r);
        if (rc <= 0)
                goto free_iovecs;
 
@@ -438,16 +438,16 @@ compat_process_vm_rw(compat_pid_t pid,
        if (vm_write)
                rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
                                                  UIO_FASTIOV, iovstack_l,
-                                                 &iov_l, 1);
+                                                 &iov_l);
        else
                rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
                                                  UIO_FASTIOV, iovstack_l,
-                                                 &iov_l, 1);
+                                                 &iov_l);
        if (rc <= 0)
                goto free_iovecs;
-       rc = compat_rw_copy_check_uvector(READ, rvec, riovcnt,
+       rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
                                          UIO_FASTIOV, iovstack_r,
-                                         &iov_r, 0);
+                                         &iov_r);
        if (rc <= 0)
                goto free_iovecs;
 
index d576b84d913c40c89232b5709360b6e4abce4dad..bd106361be4bf2d080e67f11252198c710745278 100644 (file)
@@ -263,6 +263,24 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
        return 0;
 }
 
+/*
+ * Sometimes, before we decide whether to proceed or to fail, we must check
+ * that an entry was not already brought back from swap by a racing thread.
+ *
+ * Checking page is not enough: by the time a SwapCache page is locked, it
+ * might be reused, and again be SwapCache, using the same swap as before.
+ */
+static bool shmem_confirm_swap(struct address_space *mapping,
+                              pgoff_t index, swp_entry_t swap)
+{
+       void *item;
+
+       rcu_read_lock();
+       item = radix_tree_lookup(&mapping->page_tree, index);
+       rcu_read_unlock();
+       return item == swp_to_radix_entry(swap);
+}
+
 /*
  * Like add_to_page_cache_locked, but error if expected item has gone.
  */
@@ -270,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
                                   pgoff_t index, gfp_t gfp, void *expected)
 {
-       int error = 0;
+       int error;
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(!PageSwapBacked(page));
 
+       page_cache_get(page);
+       page->mapping = mapping;
+       page->index = index;
+
+       spin_lock_irq(&mapping->tree_lock);
        if (!expected)
-               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               error = radix_tree_insert(&mapping->page_tree, index, page);
+       else
+               error = shmem_radix_tree_replace(mapping, index, expected,
+                                                                page);
        if (!error) {
-               page_cache_get(page);
-               page->mapping = mapping;
-               page->index = index;
-
-               spin_lock_irq(&mapping->tree_lock);
-               if (!expected)
-                       error = radix_tree_insert(&mapping->page_tree,
-                                                       index, page);
-               else
-                       error = shmem_radix_tree_replace(mapping, index,
-                                                       expected, page);
-               if (!error) {
-                       mapping->nrpages++;
-                       __inc_zone_page_state(page, NR_FILE_PAGES);
-                       __inc_zone_page_state(page, NR_SHMEM);
-                       spin_unlock_irq(&mapping->tree_lock);
-               } else {
-                       page->mapping = NULL;
-                       spin_unlock_irq(&mapping->tree_lock);
-                       page_cache_release(page);
-               }
-               if (!expected)
-                       radix_tree_preload_end();
+               mapping->nrpages++;
+               __inc_zone_page_state(page, NR_FILE_PAGES);
+               __inc_zone_page_state(page, NR_SHMEM);
+               spin_unlock_irq(&mapping->tree_lock);
+       } else {
+               page->mapping = NULL;
+               spin_unlock_irq(&mapping->tree_lock);
+               page_cache_release(page);
        }
-       if (error)
-               mem_cgroup_uncharge_cache_page(page);
        return error;
 }
 
@@ -683,10 +692,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
                mutex_lock(&shmem_swaplist_mutex);
                /*
                 * We needed to drop mutex to make that restrictive page
-                * allocation; but the inode might already be freed by now,
-                * and we cannot refer to inode or mapping or info to check.
-                * However, we do hold page lock on the PageSwapCache page,
-                * so can check if that still has our reference remaining.
+                * allocation, but the inode might have been freed while we
+                * dropped it: although a racing shmem_evict_inode() cannot
+                * complete without emptying the radix_tree, our page lock
+                * on this swapcache page is not enough to prevent that -
+                * free_swap_and_cache() of our swap entry will only
+                * trylock_page(), removing swap from radix_tree whatever.
+                *
+                * We must not proceed to shmem_add_to_page_cache() if the
+                * inode has been freed, but of course we cannot rely on
+                * inode or mapping or info to check that.  However, we can
+                * safely check if our swap entry is still in use (and here
+                * it can't have got reused for another page): if it's still
+                * in use, then the inode cannot have been freed yet, and we
+                * can safely proceed (if it's no longer in use, that tells
+                * nothing about the inode, but we don't need to unuse swap).
                 */
                if (!page_swapcount(*pagep))
                        error = -ENOENT;
@@ -730,9 +750,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
 
        /*
         * There's a faint possibility that swap page was replaced before
-        * caller locked it: it will come back later with the right page.
+        * caller locked it: caller will come back later with the right page.
         */
-       if (unlikely(!PageSwapCache(page)))
+       if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
                goto out;
 
        /*
@@ -995,21 +1015,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        newpage = shmem_alloc_page(gfp, info, index);
        if (!newpage)
                return -ENOMEM;
-       VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
 
-       *pagep = newpage;
        page_cache_get(newpage);
        copy_highpage(newpage, oldpage);
+       flush_dcache_page(newpage);
 
-       VM_BUG_ON(!PageLocked(oldpage));
        __set_page_locked(newpage);
-       VM_BUG_ON(!PageUptodate(oldpage));
        SetPageUptodate(newpage);
-       VM_BUG_ON(!PageSwapBacked(oldpage));
        SetPageSwapBacked(newpage);
-       VM_BUG_ON(!swap_index);
        set_page_private(newpage, swap_index);
-       VM_BUG_ON(!PageSwapCache(oldpage));
        SetPageSwapCache(newpage);
 
        /*
@@ -1019,13 +1033,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        spin_lock_irq(&swap_mapping->tree_lock);
        error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
                                                                   newpage);
-       __inc_zone_page_state(newpage, NR_FILE_PAGES);
-       __dec_zone_page_state(oldpage, NR_FILE_PAGES);
+       if (!error) {
+               __inc_zone_page_state(newpage, NR_FILE_PAGES);
+               __dec_zone_page_state(oldpage, NR_FILE_PAGES);
+       }
        spin_unlock_irq(&swap_mapping->tree_lock);
-       BUG_ON(error);
 
-       mem_cgroup_replace_page_cache(oldpage, newpage);
-       lru_cache_add_anon(newpage);
+       if (unlikely(error)) {
+               /*
+                * Is this possible?  I think not, now that our callers check
+                * both PageSwapCache and page_private after getting page lock;
+                * but be defensive.  Reverse old to newpage for clear and free.
+                */
+               oldpage = newpage;
+       } else {
+               mem_cgroup_replace_page_cache(oldpage, newpage);
+               lru_cache_add_anon(newpage);
+               *pagep = newpage;
+       }
 
        ClearPageSwapCache(oldpage);
        set_page_private(oldpage, 0);
@@ -1033,7 +1058,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        unlock_page(oldpage);
        page_cache_release(oldpage);
        page_cache_release(oldpage);
-       return 0;
+       return error;
 }
 
 /*
@@ -1107,9 +1132,10 @@ repeat:
 
                /* We have to do this with page locked to prevent races */
                lock_page(page);
-               if (!PageSwapCache(page) || page->mapping) {
+               if (!PageSwapCache(page) || page_private(page) != swap.val ||
+                   !shmem_confirm_swap(mapping, index, swap)) {
                        error = -EEXIST;        /* try again */
-                       goto failed;
+                       goto unlock;
                }
                if (!PageUptodate(page)) {
                        error = -EIO;
@@ -1125,9 +1151,12 @@ repeat:
 
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
+               if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
                                                gfp, swp_to_radix_entry(swap));
+                       /* We already confirmed swap, and make no allocation */
+                       VM_BUG_ON(error);
+               }
                if (error)
                        goto failed;
 
@@ -1164,11 +1193,18 @@ repeat:
                __set_page_locked(page);
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
-                       error = shmem_add_to_page_cache(page, mapping, index,
-                                               gfp, NULL);
                if (error)
                        goto decused;
+               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               if (!error) {
+                       error = shmem_add_to_page_cache(page, mapping, index,
+                                                       gfp, NULL);
+                       radix_tree_preload_end();
+               }
+               if (error) {
+                       mem_cgroup_uncharge_cache_page(page);
+                       goto decused;
+               }
                lru_cache_add_anon(page);
 
                spin_lock(&info->lock);
@@ -1228,14 +1264,10 @@ decused:
 unacct:
        shmem_unacct_blocks(info->flags, 1);
 failed:
-       if (swap.val && error != -EINVAL) {
-               struct page *test = find_get_page(mapping, index);
-               if (test && !radix_tree_exceptional_entry(test))
-                       page_cache_release(test);
-               /* Have another try if the entry has changed */
-               if (test != swp_to_radix_entry(swap))
-                       error = -EEXIST;
-       }
+       if (swap.val && error != -EINVAL &&
+           !shmem_confirm_swap(mapping, index, swap))
+               error = -EEXIST;
+unlock:
        if (page) {
                unlock_page(page);
                page_cache_release(page);
@@ -1247,7 +1279,7 @@ failed:
                spin_unlock(&info->lock);
                goto repeat;
        }
-       if (error == -EEXIST)
+       if (error == -EEXIST)   /* from above or from radix_tree_insert */
                goto repeat;
        return error;
 }
@@ -1577,6 +1609,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1665,7 +1698,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
@@ -1674,98 +1707,6 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        return error;
 }
 
-/*
- * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
- */
-static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
-                                   pgoff_t index, pgoff_t end, int origin)
-{
-       struct page *page;
-       struct pagevec pvec;
-       pgoff_t indices[PAGEVEC_SIZE];
-       bool done = false;
-       int i;
-
-       pagevec_init(&pvec, 0);
-       pvec.nr = 1;            /* start small: we may be there already */
-       while (!done) {
-               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-                                       pvec.nr, pvec.pages, indices);
-               if (!pvec.nr) {
-                       if (origin == SEEK_DATA)
-                               index = end;
-                       break;
-               }
-               for (i = 0; i < pvec.nr; i++, index++) {
-                       if (index < indices[i]) {
-                               if (origin == SEEK_HOLE) {
-                                       done = true;
-                                       break;
-                               }
-                               index = indices[i];
-                       }
-                       page = pvec.pages[i];
-                       if (page && !radix_tree_exceptional_entry(page)) {
-                               if (!PageUptodate(page))
-                                       page = NULL;
-                       }
-                       if (index >= end ||
-                           (page && origin == SEEK_DATA) ||
-                           (!page && origin == SEEK_HOLE)) {
-                               done = true;
-                               break;
-                       }
-               }
-               shmem_deswap_pagevec(&pvec);
-               pagevec_release(&pvec);
-               pvec.nr = PAGEVEC_SIZE;
-               cond_resched();
-       }
-       return index;
-}
-
-static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
-{
-       struct address_space *mapping;
-       struct inode *inode;
-       pgoff_t start, end;
-       loff_t new_offset;
-
-       if (origin != SEEK_DATA && origin != SEEK_HOLE)
-               return generic_file_llseek_size(file, offset, origin,
-                                                       MAX_LFS_FILESIZE);
-       mapping = file->f_mapping;
-       inode = mapping->host;
-       mutex_lock(&inode->i_mutex);
-       /* We're holding i_mutex so we can access i_size directly */
-
-       if (offset < 0)
-               offset = -EINVAL;
-       else if (offset >= inode->i_size)
-               offset = -ENXIO;
-       else {
-               start = offset >> PAGE_CACHE_SHIFT;
-               end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               new_offset = shmem_seek_hole_data(mapping, start, end, origin);
-               new_offset <<= PAGE_CACHE_SHIFT;
-               if (new_offset > offset) {
-                       if (new_offset < inode->i_size)
-                               offset = new_offset;
-                       else if (origin == SEEK_DATA)
-                               offset = -ENXIO;
-                       else
-                               offset = inode->i_size;
-               }
-       }
-
-       if (offset >= 0 && offset != file->f_pos) {
-               file->f_pos = offset;
-               file->f_version = 0;
-       }
-       mutex_unlock(&inode->i_mutex);
-       return offset;
-}
-
 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                                         loff_t len)
 {
@@ -2439,11 +2380,9 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
        return dentry;
 }
 
-static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
-                               int connectable)
+static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
+                               struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
-
        if (*len < 3) {
                *len = 3;
                return 255;
@@ -2771,7 +2710,7 @@ static const struct address_space_operations shmem_aops = {
 static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
-       .llseek         = shmem_file_llseek,
+       .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = shmem_file_aio_read,
index 80848cd3901cc8ab052f43d52e2415fb3b7f4829..8c691fa1cf3c78a91fa301bcd0cd323df1b28a1e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab = s;
-       page->flags |= 1 << PG_slab;
+       __SetPageSlab(page);
 
        start = page_address(page);
 
@@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
                freelist = page->freelist;
                counters = page->counters;
                new.counters = counters;
-               if (mode)
+               if (mode) {
                        new.inuse = page->objects;
+                       new.freelist = NULL;
+               } else {
+                       new.freelist = freelist;
+               }
 
                VM_BUG_ON(new.frozen);
                new.frozen = 1;
 
        } while (!__cmpxchg_double_slab(s, page,
                        freelist, counters,
-                       NULL, new.counters,
+                       new.freelist, new.counters,
                        "lock and freeze"));
 
        remove_partial(n, page);
@@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
                        object = t;
                        available =  page->objects - page->inuse;
                } else {
-                       page->freelist = t;
                        available = put_cpu_partial(s, page, 0);
                        stat(s, CPU_PARTIAL_NODE);
                }
@@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,
 /*
  * Get a page from somewhere. Search in increasing NUMA distances.
  */
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
                struct kmem_cache_cpu *c)
 {
 #ifdef CONFIG_NUMA
@@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags,
 }
 
 static void
-init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
+init_kmem_cache_node(struct kmem_cache_node *n)
 {
        n->nr_partial = 0;
        spin_lock_init(&n->list_lock);
@@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)
        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
        init_tracking(kmem_cache_node, n);
 #endif
-       init_kmem_cache_node(n, kmem_cache_node);
+       init_kmem_cache_node(n);
        inc_slabs_node(kmem_cache_node, node, page->objects);
 
        add_partial(n, page, DEACTIVATE_TO_HEAD);
@@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
                }
 
                s->node[node] = n;
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
        }
        return 1;
 }
@@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg)
                        ret = -ENOMEM;
                        goto out;
                }
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
                s->node[nid] = n;
        }
 out:
@@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                        }
                        return s;
                }
-               kfree(n);
                kfree(s);
        }
+       kfree(n);
 err:
        up_write(&slub_lock);
 
index 6a4bf9160e855ae1e2d61fefb4922918f710bb24..c7bb952400c83c9114a401f647955cb18dd2ea44 100644 (file)
@@ -275,8 +275,9 @@ static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
-       pg_data_t *host_pgdat;
-       unsigned long goal;
+       unsigned long goal, limit;
+       unsigned long *p;
+       int nid;
        /*
         * A page may contain usemaps for other sections preventing the
         * page being freed and making a section unremovable while
@@ -287,10 +288,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       goal = __pa(pgdat) & PAGE_SECTION_MASK;
-       host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
-       return __alloc_bootmem_node_nopanic(host_pgdat, size,
-                                           SMP_CACHE_BYTES, goal);
+       goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+       limit = goal + (1UL << PA_SECTION_SHIFT);
+       nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
+again:
+       p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
+                                         SMP_CACHE_BYTES, goal, limit);
+       if (!p && limit) {
+               limit = 0;
+               goto again;
+       }
+       return p;
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
index 457b10baef59414f591fb0bfab2b54619c5209b0..71373d03fcee99d57c29d24a25db888228bf3417 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/memcontrol.h>
 #include <linux/poll.h>
 #include <linux/oom.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
 static void free_swap_count_continuations(struct swap_info_struct *);
 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
 
-static DEFINE_SPINLOCK(swap_lock);
+DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
 long nr_swap_pages;
 long total_swap_pages;
@@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
 static const char Unused_offset[] = "Unused swap offset entry ";
 
-static struct swap_list_t swap_list = {-1, -1};
+struct swap_list_t swap_list = {-1, -1};
 
-static struct swap_info_struct *swap_info[MAX_SWAPFILES];
+struct swap_info_struct *swap_info[MAX_SWAPFILES];
 
 static DEFINE_MUTEX(swapon_mutex);
 
@@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
                        swap_list.next = p->type;
                nr_swap_pages++;
                p->inuse_pages--;
+               frontswap_invalidate_page(p->type, offset);
                if ((p->flags & SWP_BLKDEV) &&
                                disk->fops->swap_slot_free_notify)
                        disk->fops->swap_slot_free_notify(p->bdev, offset);
@@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm,
 }
 
 /*
- * Scan swap_map from current position to next entry still in use.
+ * Scan swap_map (or frontswap_map if frontswap parameter is true)
+ * from current position to next entry still in use.
  * Recycle to start on reaching the end, returning 0 when empty.
  */
 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
-                                       unsigned int prev)
+                                       unsigned int prev, bool frontswap)
 {
        unsigned int max = si->max;
        unsigned int i = prev;
@@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
                        prev = 0;
                        i = 1;
                }
+               if (frontswap) {
+                       if (frontswap_test(si, i))
+                               break;
+                       else
+                               continue;
+               }
                count = si->swap_map[i];
                if (count && swap_count(count) != SWAP_MAP_BAD)
                        break;
@@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  * We completely avoid races by reading each swap page in advance,
  * and then search for the process using it.  All the necessary
  * page table adjustments can then be made atomically.
+ *
+ * if the boolean frontswap is true, only unuse pages_to_unuse pages;
+ * pages_to_unuse==0 means all pages; ignored if frontswap is false
  */
-static int try_to_unuse(unsigned int type)
+int try_to_unuse(unsigned int type, bool frontswap,
+                unsigned long pages_to_unuse)
 {
        struct swap_info_struct *si = swap_info[type];
        struct mm_struct *start_mm;
@@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type)
         * one pass through swap_map is enough, but not necessarily:
         * there are races when an instance of an entry might be missed.
         */
-       while ((i = find_next_to_unuse(si, i)) != 0) {
+       while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
                if (signal_pending(current)) {
                        retval = -EINTR;
                        break;
@@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type)
                 * interactive performance.
                 */
                cond_resched();
+               if (frontswap && pages_to_unuse > 0) {
+                       if (!--pages_to_unuse)
+                               break;
+               }
        }
 
        mmput(start_mm);
@@ -1486,7 +1504,8 @@ bad_bmap:
 }
 
 static void enable_swap_info(struct swap_info_struct *p, int prio,
-                               unsigned char *swap_map)
+                               unsigned char *swap_map,
+                               unsigned long *frontswap_map)
 {
        int i, prev;
 
@@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
        else
                p->prio = --least_priority;
        p->swap_map = swap_map;
+       frontswap_map_set(p, frontswap_map);
        p->flags |= SWP_WRITEOK;
        nr_swap_pages += p->pages;
        total_swap_pages += p->pages;
@@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
                swap_list.head = swap_list.next = p->type;
        else
                swap_info[prev]->next = p->type;
+       frontswap_init(p->type);
        spin_unlock(&swap_lock);
 }
 
@@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        spin_unlock(&swap_lock);
 
        oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
-       err = try_to_unuse(type);
+       err = try_to_unuse(type, false, 0); /* force all pages to be unused */
        compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj);
 
        if (err) {
@@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
                 * sys_swapoff for this swap_info_struct at this point.
                 */
                /* re-insert swap space back into swap_list */
-               enable_swap_info(p, p->prio, p->swap_map);
+               enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
                goto out_dput;
        }
 
@@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        swap_map = p->swap_map;
        p->swap_map = NULL;
        p->flags = 0;
+       frontswap_invalidate_area(type);
        spin_unlock(&swap_lock);
        mutex_unlock(&swapon_mutex);
        vfree(swap_map);
+       vfree(frontswap_map_get(p));
        /* Destroy swap account informatin */
        swap_cgroup_swapoff(type);
 
@@ -1893,24 +1916,20 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
 
        /*
         * Find out how many pages are allowed for a single swap
-        * device. There are three limiting factors: 1) the number
+        * device. There are two limiting factors: 1) the number
         * of bits for the swap offset in the swp_entry_t type, and
         * 2) the number of bits in the swap pte as defined by the
-        * the different architectures, and 3) the number of free bits
-        * in an exceptional radix_tree entry. In order to find the
+        * different architectures. In order to find the
         * largest possible bit mask, a swap entry with swap type 0
         * and swap offset ~0UL is created, encoded to a swap pte,
         * decoded to a swp_entry_t again, and finally the swap
         * offset is extracted. This will mask all the bits from
         * the initial ~0UL mask that can't be encoded in either
         * the swp_entry_t or the architecture definition of a
-        * swap pte.  Then the same is done for a radix_tree entry.
+        * swap pte.
         */
        maxpages = swp_offset(pte_to_swp_entry(
-                       swp_entry_to_pte(swp_entry(0, ~0UL))));
-       maxpages = swp_offset(radix_to_swp_entry(
-                       swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
-
+                       swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
        if (maxpages > swap_header->info.last_page) {
                maxpages = swap_header->info.last_page + 1;
                /* p->max is an unsigned int: don't overflow it */
@@ -1988,6 +2007,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        sector_t span;
        unsigned long maxpages;
        unsigned char *swap_map = NULL;
+       unsigned long *frontswap_map = NULL;
        struct page *page = NULL;
        struct inode *inode = NULL;
 
@@ -2071,6 +2091,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                error = nr_extents;
                goto bad_swap;
        }
+       /* frontswap enabled? set up bit-per-page map for frontswap */
+       if (frontswap_enabled)
+               frontswap_map = vzalloc(maxpages / sizeof(long));
 
        if (p->bdev) {
                if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
@@ -2086,14 +2109,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        if (swap_flags & SWAP_FLAG_PREFER)
                prio =
                  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
-       enable_swap_info(p, prio, swap_map);
+       enable_swap_info(p, prio, swap_map, frontswap_map);
 
        printk(KERN_INFO "Adding %uk swap on %s.  "
-                       "Priority:%d extents:%d across:%lluk %s%s\n",
+                       "Priority:%d extents:%d across:%lluk %s%s%s\n",
                p->pages<<(PAGE_SHIFT-10), name, p->prio,
                nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
                (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
-               (p->flags & SWP_DISCARDABLE) ? "D" : "");
+               (p->flags & SWP_DISCARDABLE) ? "D" : "",
+               (frontswap_map) ? "FS" : "");
 
        mutex_unlock(&swapon_mutex);
        atomic_inc(&proc_poll_event);
index ae962b31de888a55990769aae948bac3ef0db338..8c7265afa29f2109b884907daa050b79f0b25f8b 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
 #include <linux/export.h>
 #include <linux/err.h>
 #include <linux/sched.h>
+#include <linux/security.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -341,6 +342,35 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
+unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot,
+       unsigned long flag, unsigned long pgoff)
+{
+       unsigned long ret;
+       struct mm_struct *mm = current->mm;
+
+       ret = security_mmap_file(file, prot, flag);
+       if (!ret) {
+               down_write(&mm->mmap_sem);
+               ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
+               up_write(&mm->mmap_sem);
+       }
+       return ret;
+}
+
+unsigned long vm_mmap(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot,
+       unsigned long flag, unsigned long offset)
+{
+       if (unlikely(offset + PAGE_ALIGN(len) < offset))
+               return -EINVAL;
+       if (unlikely(offset & ~PAGE_MASK))
+               return -EINVAL;
+
+       return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL(vm_mmap);
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
index eeb3bc9d1d361b6f20821073485f1b8e7c4931d3..66e431060c05616ace60c4d512e4b96e785723ae 100644 (file)
@@ -2688,7 +2688,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
                 * them before going back to sleep.
                 */
                set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
-               schedule();
+
+               if (!kthread_should_stop())
+                       schedule();
+
                set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
        } else {
                if (remaining)
@@ -2955,14 +2958,17 @@ int kswapd_run(int nid)
 }
 
 /*
- * Called by memory hotplug when all memory in a node is offlined.
+ * Called by memory hotplug when all memory in a node is offlined.  Caller must
+ * hold lock_memory_hotplug().
  */
 void kswapd_stop(int nid)
 {
        struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
 
-       if (kswapd)
+       if (kswapd) {
                kthread_stop(kswapd);
+               NODE_DATA(nid)->kswapd = NULL;
+       }
 }
 
 static int __init kswapd_init(void)
index 6089f0cf23b480e4686916823cecafd681a64f42..9096bcb081326c92bb64b6dedb11033073998ca5 100644 (file)
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                break;
 
        case NETDEV_DOWN:
+               if (dev->features & NETIF_F_HW_VLAN_FILTER)
+                       vlan_vid_del(dev, 0);
+
                /* Put all VLANs for this dev in the down state too.  */
                for (i = 0; i < VLAN_N_VID; i++) {
                        vlandev = vlan_group_get_device(grp, i);
index 9ee48cb3017998f47928a3d8413c5df44ec4ac73..3d33ecf133271426aec0df569c5b2a4e8eb03a8a 100644 (file)
@@ -368,7 +368,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
                                const char *sptr = va_arg(ap, const char *);
                                uint16_t len = 0;
                                if (sptr)
-                                       len = min_t(uint16_t, strlen(sptr),
+                                       len = min_t(size_t, strlen(sptr),
                                                                USHRT_MAX);
 
                                errcode = p9pdu_writef(pdu, proto_version,
index 5af18d11b5184805bf7a01c5d94d0759b21c4992..2a167658bb958ae3a302bd80a46649416c697183 100644 (file)
@@ -192,10 +192,10 @@ static int pack_sg_list(struct scatterlist *sg, int start,
                s = rest_of_page(data);
                if (s > count)
                        s = count;
+               BUG_ON(index > limit);
                sg_set_buf(&sg[index++], data, s);
                count -= s;
                data += s;
-               BUG_ON(index > limit);
        }
 
        return index-start;
index 0301b328cf0fe04cf39f302ab6061bdbc288c42b..86852963b7f708b92e4596c63a2a2960d8676cda 100644 (file)
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
        if (addr->sat_addr.s_node == ATADDR_BCAST &&
            !sock_flag(sk, SOCK_BROADCAST)) {
 #if 1
-               printk(KERN_WARNING "%s is broken and did not set "
-                                   "SO_BROADCAST. It will break when 2.2 is "
-                                   "released.\n",
+               pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
                        current->comm);
 #else
                return -EACCES;
index 051f7abae66d987177536637c235865258cabbec..779095ded689918de025f48c47db880aa9b5d724 100644 (file)
@@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
                case AX25_P_NETROM:
                        if (ax25_protocol_is_registered(AX25_P_NETROM))
                                return -ESOCKTNOSUPPORT;
+                       break;
 #endif
 #ifdef CONFIG_ROSE_MODULE
                case AX25_P_ROSE:
index 8bf97515a77d6f3ccfaae8ab3d89e1eaed46013a..c5863f499133b466787b8b5044882c4ddded194a 100644 (file)
@@ -1351,6 +1351,7 @@ void bla_free(struct bat_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
+ * @is_bcast: the packet came in a broadcast packet type.
  *
  * bla_rx avoidance checks if:
  *  * we have to race for a claim
@@ -1361,7 +1362,8 @@ void bla_free(struct bat_priv *bat_priv)
  * process the skb.
  *
  */
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
+          bool is_bcast)
 {
        struct ethhdr *ethhdr;
        struct claim search_claim, *claim = NULL;
@@ -1380,7 +1382,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
 
        if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
                /* don't allow broadcasts while requests are in flight */
-               if (is_multicast_ether_addr(ethhdr->h_dest))
+               if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
                        goto handled;
 
        memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
@@ -1406,8 +1408,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
        }
 
        /* if it is a broadcast ... */
-       if (is_multicast_ether_addr(ethhdr->h_dest)) {
-               /* ... drop it. the responsible gateway is in charge. */
+       if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+               /* ... drop it. the responsible gateway is in charge.
+                *
+                * We need to check is_bcast because with the gateway
+                * feature, broadcasts (like DHCP requests) may be sent
+                * using a unicast packet type.
+                */
                goto handled;
        } else {
                /* seems the client considers us as its best gateway.
index e39f93acc28f749793200be5b020df41fd5554e6..dc5227b398d44c7b57eda34adc4b53b9a1281f9a 100644 (file)
@@ -23,7 +23,8 @@
 #define _NET_BATMAN_ADV_BLA_H_
 
 #ifdef CONFIG_BATMAN_ADV_BLA
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
+          bool is_bcast);
 int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
 int bla_is_backbone_gw(struct sk_buff *skb,
                       struct orig_node *orig_node, int hdr_size);
@@ -41,7 +42,7 @@ void bla_free(struct bat_priv *bat_priv);
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
-                        short vid)
+                        short vid, bool is_bcast)
 {
        return 0;
 }
index 840e2c64a301156c7b343468bedc65282f8c9b6b..015471d801b42eceb554c0ed86d6a49971389774 100644 (file)
@@ -617,6 +617,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
                         * changes */
                        if (skb_linearize(skb) < 0)
                                goto out;
+                       /* skb_linearize() possibly changed skb->data */
+                       tt_query = (struct tt_query_packet *)skb->data;
 
                        tt_len = tt_query->tt_data * sizeof(struct tt_change);
 
index 6e2530b020437e7243ff2a71d6098a11d7d5df52..a0ec0e4ada4c0acf9392136cff17198b766acd1f 100644 (file)
@@ -256,7 +256,11 @@ void interface_rx(struct net_device *soft_iface,
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
        struct ethhdr *ethhdr;
        struct vlan_ethhdr *vhdr;
+       struct batman_header *batadv_header = (struct batman_header *)skb->data;
        short vid __maybe_unused = -1;
+       bool is_bcast;
+
+       is_bcast = (batadv_header->packet_type == BAT_BCAST);
 
        /* check if enough space is available for pulling, and pull */
        if (!pskb_may_pull(skb, hdr_size))
@@ -302,7 +306,7 @@ void interface_rx(struct net_device *soft_iface,
        /* Let the bridge loop avoidance check the packet. If will
         * not handle it, we can safely push it up.
         */
-       if (bla_rx(bat_priv, skb, vid))
+       if (bla_rx(bat_priv, skb, vid, is_bcast))
                goto out;
 
        netif_rx(skb);
index a66c2dcd108800e16f0280f5f42537c9f12d2699..2ab83d7fb1f84c864d5bb2a49295e9b4f8071811 100644 (file)
@@ -141,13 +141,14 @@ static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
        struct tt_orig_list_entry *orig_entry;
 
        orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
-       atomic_dec(&orig_entry->orig_node->tt_size);
        orig_node_free_ref(orig_entry->orig_node);
        kfree(orig_entry);
 }
 
 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
 {
+       /* to avoid race conditions, immediately decrease the tt counter */
+       atomic_dec(&orig_entry->orig_node->tt_size);
        call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
 }
 
@@ -910,7 +911,6 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                }
                spin_unlock_bh(list_lock);
        }
-       atomic_set(&orig_node->tt_size, 0);
        orig_node->tt_initialised = false;
 }
 
@@ -2031,10 +2031,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
 {
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
-       bool ret = true;
+       bool ret = false;
 
        if (!atomic_read(&bat_priv->ap_isolation))
-               return false;
+               goto out;
 
        tt_local_entry = tt_local_hash_find(bat_priv, dst);
        if (!tt_local_entry)
@@ -2044,10 +2044,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
        if (!tt_global_entry)
                goto out;
 
-       if (_is_ap_isolated(tt_local_entry, tt_global_entry))
+       if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
                goto out;
 
-       ret = false;
+       ret = true;
 
 out:
        if (tt_global_entry)
index 46e7f86acfc99f820b66564f553dc64fe8fbcbac..3e18af4dadc442573960b94abefed550212b9b47 100644 (file)
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
                }
 
                if (sk->sk_state == BT_CONNECTED || !newsock ||
-                   test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
+                   test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
                        bt_accept_unlink(sk);
                        if (newsock)
                                sock_graft(sk, newsock);
index 4eefb7f65cf62e6409fb5b67d0fed541eed5b541..94ad124a4ea3496c4bd971b3bc4ac9d4dda8860f 100644 (file)
@@ -3043,6 +3043,50 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
        hci_dev_unlock(hdev);
 }
 
+static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+                                        struct sk_buff *skb)
+{
+       struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status %u handle %u", hdev->name, ev->status,
+              __le16_to_cpu(ev->handle));
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+       if (!conn)
+               goto unlock;
+
+       if (!ev->status)
+               conn->sec_level = conn->pending_sec_level;
+
+       clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+       if (ev->status && conn->state == BT_CONNECTED) {
+               hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
+               hci_conn_put(conn);
+               goto unlock;
+       }
+
+       if (conn->state == BT_CONFIG) {
+               if (!ev->status)
+                       conn->state = BT_CONNECTED;
+
+               hci_proto_connect_cfm(conn, ev->status);
+               hci_conn_put(conn);
+       } else {
+               hci_auth_cfm(conn, ev->status);
+
+               hci_conn_hold(conn);
+               conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+               hci_conn_put(conn);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static inline u8 hci_get_auth_req(struct hci_conn *conn)
 {
        /* If remote requests dedicated bonding follow that lead */
@@ -3559,6 +3603,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_extended_inquiry_result_evt(hdev, skb);
                break;
 
+       case HCI_EV_KEY_REFRESH_COMPLETE:
+               hci_key_refresh_complete_evt(hdev, skb);
+               break;
+
        case HCI_EV_IO_CAPA_REQUEST:
                hci_io_capa_request_evt(hdev, skb);
                break;
index 4deaca78e91ea1853027db58f05441e8e8a3975f..9332bc7aa851fb798533cd5695fd260de68c242f 100644 (file)
@@ -1,6 +1,6 @@
 config BT_HIDP
        tristate "HIDP protocol support"
-       depends on BT && INPUT && HID_SUPPORT
+       depends on BT && INPUT
        select HID
        help
          HIDP (Human Interface Device Protocol) is a transport layer
index 24f144b72a96a87ee5f8fdc2016f613f00615344..4554e80d16a37b8ffdc0a7ac86805da2f2ee06e9 100644 (file)
@@ -1295,7 +1295,12 @@ static void security_timeout(struct work_struct *work)
        struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
                                                security_timer.work);
 
-       l2cap_conn_del(conn->hcon, ETIMEDOUT);
+       BT_DBG("conn %p", conn);
+
+       if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
+               smp_chan_destroy(conn);
+               l2cap_conn_del(conn->hcon, ETIMEDOUT);
+       }
 }
 
 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
@@ -2910,12 +2915,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
        while (len >= L2CAP_CONF_OPT_SIZE) {
                len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
 
-               switch (type) {
-               case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *)val, olen);
-                       goto done;
-               }
+               if (type != L2CAP_CONF_RFC)
+                       continue;
+
+               if (olen != sizeof(rfc))
+                       break;
+
+               memcpy(&rfc, (void *)val, olen);
+               goto done;
        }
 
        /* Use sane default values in case a misbehaving remote device
index 25d22077607963d66a73cca2d644d4df468fa78f..3e5e3362ea00443b8f1954c1487bf47625b6288f 100644 (file)
@@ -1598,7 +1598,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
        else
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
-       if (!conn) {
+       if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
                err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
                                 MGMT_STATUS_NOT_CONNECTED);
                goto failed;
@@ -1873,6 +1873,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
                pairing_complete(cmd, mgmt_status(status));
 }
 
+static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
+{
+       struct pending_cmd *cmd;
+
+       BT_DBG("status %u", status);
+
+       if (!status)
+               return;
+
+       cmd = find_pairing(conn);
+       if (!cmd)
+               BT_DBG("Unable to find a pending command");
+       else
+               pairing_complete(cmd, mgmt_status(status));
+}
+
 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                       u16 len)
 {
@@ -1934,6 +1950,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        /* For LE, just connecting isn't a proof that the pairing finished */
        if (cp->addr.type == BDADDR_BREDR)
                conn->connect_cfm_cb = pairing_complete_cb;
+       else
+               conn->connect_cfm_cb = le_connect_complete_cb;
 
        conn->security_cfm_cb = pairing_complete_cb;
        conn->disconn_cfm_cb = pairing_complete_cb;
index aa5d73b786aca23793f7691bbab7f86ceb2e96d1..d1820ff14aee46cfc55cd1c169495004c3130818 100644 (file)
@@ -710,9 +710,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
                        break;
                }
 
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&dev->wait, &wait);
index 6fc7c4708f3e1fa6336a434ef8c63d072e262d5a..37df4e9b3896435164adf36a0c52b8ab8c08030c 100644 (file)
@@ -648,7 +648,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
 
        auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
 
-       ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
+       ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
        if (ret)
                return SMP_UNSPECIFIED;
 
@@ -703,7 +703,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
        return 0;
 }
 
-static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
 {
        struct smp_ltk *key;
        struct hci_conn *hcon = conn->hcon;
@@ -712,6 +712,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
        if (!key)
                return 0;
 
+       if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
+               return 0;
+
        if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
                return 1;
 
@@ -732,7 +735,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
 
-       if (smp_ltk_encrypt(conn))
+       if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
                return 0;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
@@ -771,7 +774,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
                return 1;
 
        if (hcon->link_mode & HCI_LM_MASTER)
-               if (smp_ltk_encrypt(conn))
+               if (smp_ltk_encrypt(conn, sec_level))
                        goto done;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
index 0a942fbccc9a64592d486199608e6527ebc8de8f..e1144e1617be38814ebb2fb497755cb10b646559 100644 (file)
@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name)
                return -ENOMEM;
 
        dev_net_set(dev, net);
+       dev->rtnl_link_ops = &br_link_ops;
 
        res = register_netdev(dev);
        if (res)
index 2080485515f1be56299f01a92b5c98e034636dde..fe41260fbf38b28bb121dcc2235a5d27b83e27b7 100644 (file)
@@ -208,7 +208,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static struct rtnl_link_ops br_link_ops __read_mostly = {
+struct rtnl_link_ops br_link_ops __read_mostly = {
        .kind           = "bridge",
        .priv_size      = sizeof(struct net_bridge),
        .setup          = br_dev_setup,
index 1a8ad4fb9a6ba9e9246011e9ea674778591fd55f..a768b2408edff64890dde477df5918c102630a5e 100644 (file)
@@ -549,6 +549,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
 #endif
 
 /* br_netlink.c */
+extern struct rtnl_link_ops br_link_ops;
 extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
index aa6f716524fd3c0aecbeb0b9393f84df2c8046de..8c83c175b03a9f2379c253bd1c9e160b72457320 100644 (file)
@@ -4,8 +4,7 @@
  * Author:     Sjur Brendeland/sjur.brandeland@stericsson.com
  * License terms: GNU General Public License (GPL) version 2
  *
- * Borrowed heavily from file: pn_dev.c. Thanks to
- *  Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  *  and Sakari Ailus <sakari.ailus@nokia.com>
  */
 
@@ -562,9 +561,9 @@ static int __init caif_device_init(void)
 
 static void __exit caif_device_exit(void)
 {
-       unregister_pernet_subsys(&caif_net_ops);
        unregister_netdevice_notifier(&caif_device_notifier);
        dev_remove_pack(&caif_packet_type);
+       unregister_pernet_subsys(&caif_net_ops);
 }
 
 module_init(caif_device_init);
index fb8944355264689b4e61ba9f9402fe8cad902420..78f1cdad5b332b91401570aeccbc12af5bd33d1e 100644 (file)
@@ -220,6 +220,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
                                                cfsk_hold, cfsk_put);
                cf_sk->sk.sk_state = CAIF_CONNECTED;
                set_tx_flow_on(cf_sk);
+               cf_sk->sk.sk_shutdown = 0;
                cf_sk->sk.sk_state_change(&cf_sk->sk);
                break;
 
index cde1b4a20f758fe8055f26a7f24b40f16767ae55..46cca3a91d198093b86ea4d5701ecd42ce9dbce7 100644 (file)
@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (err < 0)
                goto free_skb;
 
-       /* to be able to check the received tx sock reference in raw_rcv() */
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
-
        skb->dev = dev;
        skb->sk  = sk;
 
index 214c2bb43d6252056a7a77dcd2d8eb5877c719d1..925ca583c09c8eae2fbaebbd73603e9194132f97 100644 (file)
@@ -59,9 +59,7 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
  */
 static int ceph_auth_none_create_authorizer(
        struct ceph_auth_client *ac, int peer_type,
-       struct ceph_authorizer **a,
-       void **buf, size_t *len,
-       void **reply_buf, size_t *reply_len)
+       struct ceph_auth_handshake *auth)
 {
        struct ceph_auth_none_info *ai = ac->private;
        struct ceph_none_authorizer *au = &ai->au;
@@ -82,11 +80,12 @@ static int ceph_auth_none_create_authorizer(
                dout("built authorizer len %d\n", au->buf_len);
        }
 
-       *a = (struct ceph_authorizer *)au;
-       *buf = au->buf;
-       *len = au->buf_len;
-       *reply_buf = au->reply_buf;
-       *reply_len = sizeof(au->reply_buf);
+       auth->authorizer = (struct ceph_authorizer *) au;
+       auth->authorizer_buf = au->buf;
+       auth->authorizer_buf_len = au->buf_len;
+       auth->authorizer_reply_buf = au->reply_buf;
+       auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
        return 0;
 
 bad2:
index 1587dc6010c6276fd7c6e997bb7af6fe08d426be..a16bf14eb027cd4e765320f98b0fb378b5a5e7c4 100644 (file)
@@ -526,9 +526,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
 
 static int ceph_x_create_authorizer(
        struct ceph_auth_client *ac, int peer_type,
-       struct ceph_authorizer **a,
-       void **buf, size_t *len,
-       void **reply_buf, size_t *reply_len)
+       struct ceph_auth_handshake *auth)
 {
        struct ceph_x_authorizer *au;
        struct ceph_x_ticket_handler *th;
@@ -548,11 +546,12 @@ static int ceph_x_create_authorizer(
                return ret;
        }
 
-       *a = (struct ceph_authorizer *)au;
-       *buf = au->buf->vec.iov_base;
-       *len = au->buf->vec.iov_len;
-       *reply_buf = au->reply_buf;
-       *reply_len = sizeof(au->reply_buf);
+       auth->authorizer = (struct ceph_authorizer *) au;
+       auth->authorizer_buf = au->buf->vec.iov_base;
+       auth->authorizer_buf_len = au->buf->vec.iov_len;
+       auth->authorizer_reply_buf = au->reply_buf;
+       auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
        return 0;
 }
 
index a776f751edbf223220b63cdac9dadb38187bb2b6..ba4323bce0e92beff34d13dcdc45b63a1b14dce7 100644 (file)
@@ -504,13 +504,6 @@ void ceph_destroy_client(struct ceph_client *client)
        /* unmount */
        ceph_osdc_stop(&client->osdc);
 
-       /*
-        * make sure osd connections close out before destroying the
-        * auth module, which is needed to free those connections'
-        * ceph_authorizers.
-        */
-       ceph_msgr_flush();
-
        ceph_monc_stop(&client->monc);
 
        ceph_debugfs_client_cleanup(client);
index d6ebb13a18a4bc787eb249ad3e4b62ec1c174f97..089613234f032610c05f25a239c1d2053e768b45 100644 (file)
@@ -26,9 +26,9 @@ const char *crush_bucket_alg_name(int alg)
  * @b: bucket pointer
  * @p: item index in bucket
  */
-int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
+int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
 {
-       if (p >= b->size)
+       if ((__u32)p >= b->size)
                return 0;
 
        switch (b->alg) {
@@ -37,38 +37,13 @@ int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
        case CRUSH_BUCKET_LIST:
                return ((struct crush_bucket_list *)b)->item_weights[p];
        case CRUSH_BUCKET_TREE:
-               if (p & 1)
-                       return ((struct crush_bucket_tree *)b)->node_weights[p];
-               return 0;
+               return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
        case CRUSH_BUCKET_STRAW:
                return ((struct crush_bucket_straw *)b)->item_weights[p];
        }
        return 0;
 }
 
-/**
- * crush_calc_parents - Calculate parent vectors for the given crush map.
- * @map: crush_map pointer
- */
-void crush_calc_parents(struct crush_map *map)
-{
-       int i, b, c;
-
-       for (b = 0; b < map->max_buckets; b++) {
-               if (map->buckets[b] == NULL)
-                       continue;
-               for (i = 0; i < map->buckets[b]->size; i++) {
-                       c = map->buckets[b]->items[i];
-                       BUG_ON(c >= map->max_devices ||
-                              c < -map->max_buckets);
-                       if (c >= 0)
-                               map->device_parents[c] = map->buckets[b]->id;
-                       else
-                               map->bucket_parents[-1-c] = map->buckets[b]->id;
-               }
-       }
-}
-
 void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
 {
        kfree(b->h.perm);
@@ -87,6 +62,8 @@ void crush_destroy_bucket_list(struct crush_bucket_list *b)
 
 void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
 {
+       kfree(b->h.perm);
+       kfree(b->h.items);
        kfree(b->node_weights);
        kfree(b);
 }
@@ -124,10 +101,9 @@ void crush_destroy_bucket(struct crush_bucket *b)
  */
 void crush_destroy(struct crush_map *map)
 {
-       int b;
-
        /* buckets */
        if (map->buckets) {
+               __s32 b;
                for (b = 0; b < map->max_buckets; b++) {
                        if (map->buckets[b] == NULL)
                                continue;
@@ -138,13 +114,12 @@ void crush_destroy(struct crush_map *map)
 
        /* rules */
        if (map->rules) {
+               __u32 b;
                for (b = 0; b < map->max_rules; b++)
                        kfree(map->rules[b]);
                kfree(map->rules);
        }
 
-       kfree(map->bucket_parents);
-       kfree(map->device_parents);
        kfree(map);
 }
 
index 363f8f7e6c3caa15fa03d0bcea1967b731f2ae1d..d7edc24333b84d5aab17da2d983878ff5044b2bb 100644 (file)
@@ -33,9 +33,9 @@
  * @type: storage ruleset type (user defined)
  * @size: output set size
  */
-int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
+int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
 {
-       int i;
+       __u32 i;
 
        for (i = 0; i < map->max_rules; i++) {
                if (map->rules[i] &&
@@ -73,7 +73,7 @@ static int bucket_perm_choose(struct crush_bucket *bucket,
        unsigned int i, s;
 
        /* start a new permutation if @x has changed */
-       if (bucket->perm_x != x || bucket->perm_n == 0) {
+       if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
                dprintk("bucket %d new x=%d\n", bucket->id, x);
                bucket->perm_x = x;
 
@@ -153,8 +153,8 @@ static int bucket_list_choose(struct crush_bucket_list *bucket,
                        return bucket->h.items[i];
        }
 
-       BUG_ON(1);
-       return 0;
+       dprintk("bad list sums for bucket %d\n", bucket->h.id);
+       return bucket->h.items[0];
 }
 
 
@@ -220,7 +220,7 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket,
 static int bucket_straw_choose(struct crush_bucket_straw *bucket,
                               int x, int r)
 {
-       int i;
+       __u32 i;
        int high = 0;
        __u64 high_draw = 0;
        __u64 draw;
@@ -240,6 +240,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
 static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
 {
        dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
+       BUG_ON(in->size == 0);
        switch (in->alg) {
        case CRUSH_BUCKET_UNIFORM:
                return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -254,7 +255,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
                return bucket_straw_choose((struct crush_bucket_straw *)in,
                                           x, r);
        default:
-               BUG_ON(1);
+               dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
                return in->items[0];
        }
 }
@@ -263,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
  * true if device is marked "out" (failed, fully offloaded)
  * of the cluster
  */
-static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
+static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x)
 {
        if (weight[item] >= 0x10000)
                return 0;
@@ -288,16 +289,16 @@ static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
  * @recurse_to_leaf: true if we want one device under each item of given type
  * @out2: second output vector for leaf items (if @recurse_to_leaf)
  */
-static int crush_choose(struct crush_map *map,
+static int crush_choose(const struct crush_map *map,
                        struct crush_bucket *bucket,
-                       __u32 *weight,
+                       const __u32 *weight,
                        int x, int numrep, int type,
                        int *out, int outpos,
                        int firstn, int recurse_to_leaf,
                        int *out2)
 {
        int rep;
-       int ftotal, flocal;
+       unsigned int ftotal, flocal;
        int retry_descent, retry_bucket, skip_rep;
        struct crush_bucket *in = bucket;
        int r;
@@ -305,7 +306,7 @@ static int crush_choose(struct crush_map *map,
        int item = 0;
        int itemtype;
        int collide, reject;
-       const int orig_tries = 5; /* attempts before we fall back to search */
+       const unsigned int orig_tries = 5; /* attempts before we fall back to search */
 
        dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
                bucket->id, x, outpos, numrep);
@@ -326,7 +327,7 @@ static int crush_choose(struct crush_map *map,
                                r = rep;
                                if (in->alg == CRUSH_BUCKET_UNIFORM) {
                                        /* be careful */
-                                       if (firstn || numrep >= in->size)
+                                       if (firstn || (__u32)numrep >= in->size)
                                                /* r' = r + f_total */
                                                r += ftotal;
                                        else if (in->size % numrep == 0)
@@ -355,7 +356,11 @@ static int crush_choose(struct crush_map *map,
                                        item = bucket_perm_choose(in, x, r);
                                else
                                        item = crush_bucket_choose(in, x, r);
-                               BUG_ON(item >= map->max_devices);
+                               if (item >= map->max_devices) {
+                                       dprintk("   bad item %d\n", item);
+                                       skip_rep = 1;
+                                       break;
+                               }
 
                                /* desired type? */
                                if (item < 0)
@@ -366,8 +371,12 @@ static int crush_choose(struct crush_map *map,
 
                                /* keep going? */
                                if (itemtype != type) {
-                                       BUG_ON(item >= 0 ||
-                                              (-1-item) >= map->max_buckets);
+                                       if (item >= 0 ||
+                                           (-1-item) >= map->max_buckets) {
+                                               dprintk("   bad item type %d\n", type);
+                                               skip_rep = 1;
+                                               break;
+                                       }
                                        in = map->buckets[-1-item];
                                        retry_bucket = 1;
                                        continue;
@@ -416,7 +425,7 @@ reject:
                                        if (collide && flocal < 3)
                                                /* retry locally a few times */
                                                retry_bucket = 1;
-                                       else if (flocal < in->size + orig_tries)
+                                       else if (flocal <= in->size + orig_tries)
                                                /* exhaustive bucket search */
                                                retry_bucket = 1;
                                        else if (ftotal < 20)
@@ -426,7 +435,7 @@ reject:
                                                /* else give up */
                                                skip_rep = 1;
                                        dprintk("  reject %d  collide %d  "
-                                               "ftotal %d  flocal %d\n",
+                                               "ftotal %u  flocal %u\n",
                                                reject, collide, ftotal,
                                                flocal);
                                }
@@ -455,15 +464,12 @@ reject:
  * @x: hash input
  * @result: pointer to result vector
  * @result_max: maximum result size
- * @force: force initial replica choice; -1 for none
  */
-int crush_do_rule(struct crush_map *map,
+int crush_do_rule(const struct crush_map *map,
                  int ruleno, int x, int *result, int result_max,
-                 int force, __u32 *weight)
+                 const __u32 *weight)
 {
        int result_len;
-       int force_context[CRUSH_MAX_DEPTH];
-       int force_pos = -1;
        int a[CRUSH_MAX_SET];
        int b[CRUSH_MAX_SET];
        int c[CRUSH_MAX_SET];
@@ -474,66 +480,44 @@ int crush_do_rule(struct crush_map *map,
        int osize;
        int *tmp;
        struct crush_rule *rule;
-       int step;
+       __u32 step;
        int i, j;
        int numrep;
        int firstn;
 
-       BUG_ON(ruleno >= map->max_rules);
+       if ((__u32)ruleno >= map->max_rules) {
+               dprintk(" bad ruleno %d\n", ruleno);
+               return 0;
+       }
 
        rule = map->rules[ruleno];
        result_len = 0;
        w = a;
        o = b;
 
-       /*
-        * determine hierarchical context of force, if any.  note
-        * that this may or may not correspond to the specific types
-        * referenced by the crush rule.
-        */
-       if (force >= 0 &&
-           force < map->max_devices &&
-           map->device_parents[force] != 0 &&
-           !is_out(map, weight, force, x)) {
-               while (1) {
-                       force_context[++force_pos] = force;
-                       if (force >= 0)
-                               force = map->device_parents[force];
-                       else
-                               force = map->bucket_parents[-1-force];
-                       if (force == 0)
-                               break;
-               }
-       }
-
        for (step = 0; step < rule->len; step++) {
+               struct crush_rule_step *curstep = &rule->steps[step];
+
                firstn = 0;
-               switch (rule->steps[step].op) {
+               switch (curstep->op) {
                case CRUSH_RULE_TAKE:
-                       w[0] = rule->steps[step].arg1;
-
-                       /* find position in force_context/hierarchy */
-                       while (force_pos >= 0 &&
-                              force_context[force_pos] != w[0])
-                               force_pos--;
-                       /* and move past it */
-                       if (force_pos >= 0)
-                               force_pos--;
-
+                       w[0] = curstep->arg1;
                        wsize = 1;
                        break;
 
                case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
                case CRUSH_RULE_CHOOSE_FIRSTN:
                        firstn = 1;
+                       /* fall through */
                case CRUSH_RULE_CHOOSE_LEAF_INDEP:
                case CRUSH_RULE_CHOOSE_INDEP:
-                       BUG_ON(wsize == 0);
+                       if (wsize == 0)
+                               break;
 
                        recurse_to_leaf =
-                               rule->steps[step].op ==
+                               curstep->op ==
                                 CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
-                               rule->steps[step].op ==
+                               curstep->op ==
                                CRUSH_RULE_CHOOSE_LEAF_INDEP;
 
                        /* reset output */
@@ -545,32 +529,18 @@ int crush_do_rule(struct crush_map *map,
                                 * basically, numrep <= 0 means relative to
                                 * the provided result_max
                                 */
-                               numrep = rule->steps[step].arg1;
+                               numrep = curstep->arg1;
                                if (numrep <= 0) {
                                        numrep += result_max;
                                        if (numrep <= 0)
                                                continue;
                                }
                                j = 0;
-                               if (osize == 0 && force_pos >= 0) {
-                                       /* skip any intermediate types */
-                                       while (force_pos &&
-                                              force_context[force_pos] < 0 &&
-                                              rule->steps[step].arg2 !=
-                                              map->buckets[-1 -
-                                              force_context[force_pos]]->type)
-                                               force_pos--;
-                                       o[osize] = force_context[force_pos];
-                                       if (recurse_to_leaf)
-                                               c[osize] = force_context[0];
-                                       j++;
-                                       force_pos--;
-                               }
                                osize += crush_choose(map,
                                                      map->buckets[-1-w[i]],
                                                      weight,
                                                      x, numrep,
-                                                     rule->steps[step].arg2,
+                                                     curstep->arg2,
                                                      o+osize, j,
                                                      firstn,
                                                      recurse_to_leaf, c+osize);
@@ -597,7 +567,9 @@ int crush_do_rule(struct crush_map *map,
                        break;
 
                default:
-                       BUG_ON(1);
+                       dprintk(" unknown op %d at step %d\n",
+                               curstep->op, step);
+                       break;
                }
        }
        return result_len;
index 36fa6bf684981688ff95c22788acb8db721271de..10255e81be79d84c5428504c2a79be3180626f79 100644 (file)
@@ -563,6 +563,10 @@ static void prepare_write_message(struct ceph_connection *con)
                m->hdr.seq = cpu_to_le64(++con->out_seq);
                m->needs_out_seq = false;
        }
+#ifdef CONFIG_BLOCK
+       else
+               m->bio_iter = NULL;
+#endif
 
        dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
             m, con->out_seq, le16_to_cpu(m->hdr.type),
@@ -653,54 +657,57 @@ static void prepare_write_keepalive(struct ceph_connection *con)
  * Connection negotiation.
  */
 
-static int prepare_connect_authorizer(struct ceph_connection *con)
+static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
+                                               int *auth_proto)
 {
-       void *auth_buf;
-       int auth_len = 0;
-       int auth_protocol = 0;
+       struct ceph_auth_handshake *auth;
+
+       if (!con->ops->get_authorizer) {
+               con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
+               con->out_connect.authorizer_len = 0;
+
+               return NULL;
+       }
+
+       /* Can't hold the mutex while getting authorizer */
 
        mutex_unlock(&con->mutex);
-       if (con->ops->get_authorizer)
-               con->ops->get_authorizer(con, &auth_buf, &auth_len,
-                                        &auth_protocol, &con->auth_reply_buf,
-                                        &con->auth_reply_buf_len,
-                                        con->auth_retry);
+
+       auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
+
        mutex_lock(&con->mutex);
 
-       if (test_bit(CLOSED, &con->state) ||
-           test_bit(OPENING, &con->state))
-               return -EAGAIN;
+       if (IS_ERR(auth))
+               return auth;
+       if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+               return ERR_PTR(-EAGAIN);
 
-       con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
-       con->out_connect.authorizer_len = cpu_to_le32(auth_len);
+       con->auth_reply_buf = auth->authorizer_reply_buf;
+       con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
 
-       if (auth_len)
-               ceph_con_out_kvec_add(con, auth_len, auth_buf);
 
-       return 0;
+       return auth;
 }
 
 /*
  * We connected to a peer and are saying hello.
  */
-static void prepare_write_banner(struct ceph_messenger *msgr,
-                                struct ceph_connection *con)
+static void prepare_write_banner(struct ceph_connection *con)
 {
-       ceph_con_out_kvec_reset(con);
        ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
-       ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr),
-                                       &msgr->my_enc_addr);
+       ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+                                       &con->msgr->my_enc_addr);
 
        con->out_more = 0;
        set_bit(WRITE_PENDING, &con->state);
 }
 
-static int prepare_write_connect(struct ceph_messenger *msgr,
-                                struct ceph_connection *con,
-                                int include_banner)
+static int prepare_write_connect(struct ceph_connection *con)
 {
        unsigned int global_seq = get_global_seq(con->msgr, 0);
        int proto;
+       int auth_proto;
+       struct ceph_auth_handshake *auth;
 
        switch (con->peer_name.type) {
        case CEPH_ENTITY_TYPE_MON:
@@ -719,23 +726,32 @@ static int prepare_write_connect(struct ceph_messenger *msgr,
        dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
             con->connect_seq, global_seq, proto);
 
-       con->out_connect.features = cpu_to_le64(msgr->supported_features);
+       con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
        con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
        con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
        con->out_connect.global_seq = cpu_to_le32(global_seq);
        con->out_connect.protocol_version = cpu_to_le32(proto);
        con->out_connect.flags = 0;
 
-       if (include_banner)
-               prepare_write_banner(msgr, con);
-       else
-               ceph_con_out_kvec_reset(con);
-       ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect);
+       auth_proto = CEPH_AUTH_UNKNOWN;
+       auth = get_connect_authorizer(con, &auth_proto);
+       if (IS_ERR(auth))
+               return PTR_ERR(auth);
+
+       con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
+       con->out_connect.authorizer_len = auth ?
+               cpu_to_le32(auth->authorizer_buf_len) : 0;
+
+       ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+                                       &con->out_connect);
+       if (auth && auth->authorizer_buf_len)
+               ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+                                       auth->authorizer_buf);
 
        con->out_more = 0;
        set_bit(WRITE_PENDING, &con->state);
 
-       return prepare_connect_authorizer(con);
+       return 0;
 }
 
 /*
@@ -992,11 +1008,10 @@ static int prepare_read_message(struct ceph_connection *con)
 
 
 static int read_partial(struct ceph_connection *con,
-                       int *to, int size, void *object)
+                       int end, int size, void *object)
 {
-       *to += size;
-       while (con->in_base_pos < *to) {
-               int left = *to - con->in_base_pos;
+       while (con->in_base_pos < end) {
+               int left = end - con->in_base_pos;
                int have = size - left;
                int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
                if (ret <= 0)
@@ -1012,37 +1027,52 @@ static int read_partial(struct ceph_connection *con,
  */
 static int read_partial_banner(struct ceph_connection *con)
 {
-       int ret, to = 0;
+       int size;
+       int end;
+       int ret;
 
        dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
 
        /* peer's banner */
-       ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
+       size = strlen(CEPH_BANNER);
+       end = size;
+       ret = read_partial(con, end, size, con->in_banner);
        if (ret <= 0)
                goto out;
-       ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
-                          &con->actual_peer_addr);
+
+       size = sizeof (con->actual_peer_addr);
+       end += size;
+       ret = read_partial(con, end, size, &con->actual_peer_addr);
        if (ret <= 0)
                goto out;
-       ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
-                          &con->peer_addr_for_me);
+
+       size = sizeof (con->peer_addr_for_me);
+       end += size;
+       ret = read_partial(con, end, size, &con->peer_addr_for_me);
        if (ret <= 0)
                goto out;
+
 out:
        return ret;
 }
 
 static int read_partial_connect(struct ceph_connection *con)
 {
-       int ret, to = 0;
+       int size;
+       int end;
+       int ret;
 
        dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
 
-       ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
+       size = sizeof (con->in_reply);
+       end = size;
+       ret = read_partial(con, end, size, &con->in_reply);
        if (ret <= 0)
                goto out;
-       ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
-                          con->auth_reply_buf);
+
+       size = le32_to_cpu(con->in_reply.authorizer_len);
+       end += size;
+       ret = read_partial(con, end, size, con->auth_reply_buf);
        if (ret <= 0)
                goto out;
 
@@ -1377,7 +1407,8 @@ static int process_connect(struct ceph_connection *con)
                        return -1;
                }
                con->auth_retry = 1;
-               ret = prepare_write_connect(con->msgr, con, 0);
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
                if (ret < 0)
                        return ret;
                prepare_read_connect(con);
@@ -1392,12 +1423,15 @@ static int process_connect(struct ceph_connection *con)
                 * dropped messages.
                 */
                dout("process_connect got RESET peer seq %u\n",
-                    le32_to_cpu(con->in_connect.connect_seq));
+                    le32_to_cpu(con->in_reply.connect_seq));
                pr_err("%s%lld %s connection reset\n",
                       ENTITY_NAME(con->peer_name),
                       ceph_pr_addr(&con->peer_addr.in_addr));
                reset_connection(con);
-               prepare_write_connect(con->msgr, con, 0);
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       return ret;
                prepare_read_connect(con);
 
                /* Tell ceph about it. */
@@ -1416,11 +1450,14 @@ static int process_connect(struct ceph_connection *con)
                 * If we sent a smaller connect_seq than the peer has, try
                 * again with a larger value.
                 */
-               dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
+               dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
                     le32_to_cpu(con->out_connect.connect_seq),
-                    le32_to_cpu(con->in_connect.connect_seq));
-               con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
-               prepare_write_connect(con->msgr, con, 0);
+                    le32_to_cpu(con->in_reply.connect_seq));
+               con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       return ret;
                prepare_read_connect(con);
                break;
 
@@ -1431,10 +1468,13 @@ static int process_connect(struct ceph_connection *con)
                 */
                dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
                     con->peer_global_seq,
-                    le32_to_cpu(con->in_connect.global_seq));
+                    le32_to_cpu(con->in_reply.global_seq));
                get_global_seq(con->msgr,
-                              le32_to_cpu(con->in_connect.global_seq));
-               prepare_write_connect(con->msgr, con, 0);
+                              le32_to_cpu(con->in_reply.global_seq));
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       return ret;
                prepare_read_connect(con);
                break;
 
@@ -1491,10 +1531,10 @@ static int process_connect(struct ceph_connection *con)
  */
 static int read_partial_ack(struct ceph_connection *con)
 {
-       int to = 0;
+       int size = sizeof (con->in_temp_ack);
+       int end = size;
 
-       return read_partial(con, &to, sizeof(con->in_temp_ack),
-                           &con->in_temp_ack);
+       return read_partial(con, end, size, &con->in_temp_ack);
 }
 
 
@@ -1627,8 +1667,9 @@ static int read_partial_message_bio(struct ceph_connection *con,
 static int read_partial_message(struct ceph_connection *con)
 {
        struct ceph_msg *m = con->in_msg;
+       int size;
+       int end;
        int ret;
-       int to, left;
        unsigned int front_len, middle_len, data_len;
        bool do_datacrc = !con->msgr->nocrc;
        int skip;
@@ -1638,15 +1679,11 @@ static int read_partial_message(struct ceph_connection *con)
        dout("read_partial_message con %p msg %p\n", con, m);
 
        /* header */
-       while (con->in_base_pos < sizeof(con->in_hdr)) {
-               left = sizeof(con->in_hdr) - con->in_base_pos;
-               ret = ceph_tcp_recvmsg(con->sock,
-                                      (char *)&con->in_hdr + con->in_base_pos,
-                                      left);
-               if (ret <= 0)
-                       return ret;
-               con->in_base_pos += ret;
-       }
+       size = sizeof (con->in_hdr);
+       end = size;
+       ret = read_partial(con, end, size, &con->in_hdr);
+       if (ret <= 0)
+               return ret;
 
        crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
        if (cpu_to_le32(crc) != con->in_hdr.crc) {
@@ -1759,16 +1796,12 @@ static int read_partial_message(struct ceph_connection *con)
        }
 
        /* footer */
-       to = sizeof(m->hdr) + sizeof(m->footer);
-       while (con->in_base_pos < to) {
-               left = to - con->in_base_pos;
-               ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
-                                      (con->in_base_pos - sizeof(m->hdr)),
-                                      left);
-               if (ret <= 0)
-                       return ret;
-               con->in_base_pos += ret;
-       }
+       size = sizeof (m->footer);
+       end += size;
+       ret = read_partial(con, end, size, &m->footer);
+       if (ret <= 0)
+               return ret;
+
        dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
             m, front_len, m->footer.front_crc, middle_len,
             m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -1835,7 +1868,6 @@ static void process_message(struct ceph_connection *con)
  */
 static int try_write(struct ceph_connection *con)
 {
-       struct ceph_messenger *msgr = con->msgr;
        int ret = 1;
 
        dout("try_write start %p state %lu nref %d\n", con, con->state,
@@ -1846,7 +1878,11 @@ more:
 
        /* open the socket first? */
        if (con->sock == NULL) {
-               prepare_write_connect(msgr, con, 1);
+               ceph_con_out_kvec_reset(con);
+               prepare_write_banner(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       goto out;
                prepare_read_banner(con);
                set_bit(CONNECTING, &con->state);
                clear_bit(NEGOTIATING, &con->state);
index 10d6008d31f21f982fa929a023a4f0a5a1b02d2a..d0649a9655be3b7bbf1baaa03220548c74156ff0 100644 (file)
@@ -847,6 +847,14 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
 
        mutex_unlock(&monc->mutex);
 
+       /*
+        * flush msgr queue before we destroy ourselves to ensure that:
+        *  - any work that references our embedded con is finished.
+        *  - any osd_client or other work that may reference an authorizer
+        *    finishes before we shut down the auth subsystem.
+        */
+       ceph_msgr_flush();
+
        ceph_auth_destroy(monc->auth);
 
        ceph_msg_put(monc->m_auth);
index 1b0ef3c4d393c5221d30c15eb24b935ee292e3fc..ca59e66c9787303805519f2bf325cc5d5817ff55 100644 (file)
@@ -139,15 +139,15 @@ void ceph_osdc_release_request(struct kref *kref)
 
        if (req->r_request)
                ceph_msg_put(req->r_request);
-       if (req->r_reply)
-               ceph_msg_put(req->r_reply);
        if (req->r_con_filling_msg) {
                dout("release_request revoking pages %p from con %p\n",
                     req->r_pages, req->r_con_filling_msg);
                ceph_con_revoke_message(req->r_con_filling_msg,
                                      req->r_reply);
-               ceph_con_put(req->r_con_filling_msg);
+               req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
        }
+       if (req->r_reply)
+               ceph_msg_put(req->r_reply);
        if (req->r_own_pages)
                ceph_release_page_vector(req->r_pages,
                                         req->r_num_pages);
@@ -278,7 +278,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
 {
        dst->op = cpu_to_le16(src->op);
 
-       switch (dst->op) {
+       switch (src->op) {
        case CEPH_OSD_OP_READ:
        case CEPH_OSD_OP_WRITE:
                dst->extent.offset =
@@ -664,11 +664,11 @@ static void put_osd(struct ceph_osd *osd)
 {
        dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
             atomic_read(&osd->o_ref) - 1);
-       if (atomic_dec_and_test(&osd->o_ref)) {
+       if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
                struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
 
-               if (osd->o_authorizer)
-                       ac->ops->destroy_authorizer(ac, osd->o_authorizer);
+               if (ac->ops && ac->ops->destroy_authorizer)
+                       ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
                kfree(osd);
        }
 }
@@ -841,6 +841,12 @@ static void register_request(struct ceph_osd_client *osdc,
 static void __unregister_request(struct ceph_osd_client *osdc,
                                 struct ceph_osd_request *req)
 {
+       if (RB_EMPTY_NODE(&req->r_node)) {
+               dout("__unregister_request %p tid %lld not registered\n",
+                       req, req->r_tid);
+               return;
+       }
+
        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
        rb_erase(&req->r_node, &osdc->requests);
        osdc->num_requests--;
@@ -1210,7 +1216,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
        if (req->r_con_filling_msg == con && req->r_reply == msg) {
                dout(" dropping con_filling_msg ref %p\n", con);
                req->r_con_filling_msg = NULL;
-               ceph_con_put(con);
+               con->ops->put(con);
        }
 
        if (!req->r_got_reply) {
@@ -2022,7 +2028,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
                dout("get_reply revoking msg %p from old con %p\n",
                     req->r_reply, req->r_con_filling_msg);
                ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
-               ceph_con_put(req->r_con_filling_msg);
+               req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
                req->r_con_filling_msg = NULL;
        }
 
@@ -2057,7 +2063,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 #endif
        }
        *skip = 0;
-       req->r_con_filling_msg = ceph_con_get(con);
+       req->r_con_filling_msg = con->ops->get(con);
        dout("get_reply tid %lld %p\n", tid, m);
 
 out:
@@ -2108,37 +2114,32 @@ static void put_osd_con(struct ceph_connection *con)
 /*
  * authentication
  */
-static int get_authorizer(struct ceph_connection *con,
-                         void **buf, int *len, int *proto,
-                         void **reply_buf, int *reply_len, int force_new)
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately.  Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+                                       int *proto, int force_new)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
        struct ceph_auth_client *ac = osdc->client->monc.auth;
-       int ret = 0;
+       struct ceph_auth_handshake *auth = &o->o_auth;
 
-       if (force_new && o->o_authorizer) {
-               ac->ops->destroy_authorizer(ac, o->o_authorizer);
-               o->o_authorizer = NULL;
-       }
-       if (o->o_authorizer == NULL) {
-               ret = ac->ops->create_authorizer(
-                       ac, CEPH_ENTITY_TYPE_OSD,
-                       &o->o_authorizer,
-                       &o->o_authorizer_buf,
-                       &o->o_authorizer_buf_len,
-                       &o->o_authorizer_reply_buf,
-                       &o->o_authorizer_reply_buf_len);
+       if (force_new && auth->authorizer) {
+               if (ac->ops && ac->ops->destroy_authorizer)
+                       ac->ops->destroy_authorizer(ac, auth->authorizer);
+               auth->authorizer = NULL;
+       }
+       if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+               int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
+                                                       auth);
                if (ret)
-                       return ret;
+                       return ERR_PTR(ret);
        }
-
        *proto = ac->protocol;
-       *buf = o->o_authorizer_buf;
-       *len = o->o_authorizer_buf_len;
-       *reply_buf = o->o_authorizer_reply_buf;
-       *reply_len = o->o_authorizer_reply_buf_len;
-       return 0;
+
+       return auth;
 }
 
 
@@ -2148,7 +2149,11 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
        struct ceph_osd_client *osdc = o->o_osdc;
        struct ceph_auth_client *ac = osdc->client->monc.auth;
 
-       return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
+       /*
+        * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
+        * XXX which do we do:  succeed or fail?
+        */
+       return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
@@ -2157,7 +2162,7 @@ static int invalidate_authorizer(struct ceph_connection *con)
        struct ceph_osd_client *osdc = o->o_osdc;
        struct ceph_auth_client *ac = osdc->client->monc.auth;
 
-       if (ac->ops->invalidate_authorizer)
+       if (ac->ops && ac->ops->invalidate_authorizer)
                ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
 
        return ceph_monc_validate_auth(&osdc->client->monc);
index 56e561a690044ee88b7fe22d4ed51c09910c8e99..81e3b84a77efdecb6c44603e7784a083fe94b980 100644 (file)
@@ -161,13 +161,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
        c->max_rules = ceph_decode_32(p);
        c->max_devices = ceph_decode_32(p);
 
-       c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
-       if (c->device_parents == NULL)
-               goto badmem;
-       c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
-       if (c->bucket_parents == NULL)
-               goto badmem;
-
        c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
        if (c->buckets == NULL)
                goto badmem;
@@ -890,8 +883,12 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                pglen = ceph_decode_32(p);
 
                if (pglen) {
-                       /* insert */
                        ceph_decode_need(p, end, pglen*sizeof(u32), bad);
+
+                       /* removing existing (if any) */
+                       (void) __remove_pg_mapping(&map->pg_temp, pgid);
+
+                       /* insert */
                        pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
                        if (!pg) {
                                err = -ENOMEM;
@@ -1000,7 +997,6 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
 {
        unsigned int num, num_mask;
        struct ceph_pg pgid;
-       s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
        int poolid = le32_to_cpu(fl->fl_pg_pool);
        struct ceph_pg_pool_info *pool;
        unsigned int ps;
@@ -1011,23 +1007,13 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
        if (!pool)
                return -EIO;
        ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
-       if (preferred >= 0) {
-               ps += preferred;
-               num = le32_to_cpu(pool->v.lpg_num);
-               num_mask = pool->lpg_num_mask;
-       } else {
-               num = le32_to_cpu(pool->v.pg_num);
-               num_mask = pool->pg_num_mask;
-       }
+       num = le32_to_cpu(pool->v.pg_num);
+       num_mask = pool->pg_num_mask;
 
        pgid.ps = cpu_to_le16(ps);
-       pgid.preferred = cpu_to_le16(preferred);
+       pgid.preferred = cpu_to_le16(-1);
        pgid.pool = fl->fl_pg_pool;
-       if (preferred >= 0)
-               dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
-                    (int)preferred);
-       else
-               dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
+       dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
 
        ol->ol_pgid = pgid;
        ol->ol_stripe_unit = fl->fl_object_stripe_unit;
@@ -1045,24 +1031,18 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        struct ceph_pg_mapping *pg;
        struct ceph_pg_pool_info *pool;
        int ruleno;
-       unsigned int poolid, ps, pps, t;
-       int preferred;
+       unsigned int poolid, ps, pps, t, r;
 
        poolid = le32_to_cpu(pgid.pool);
        ps = le16_to_cpu(pgid.ps);
-       preferred = (s16)le16_to_cpu(pgid.preferred);
 
        pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
        if (!pool)
                return NULL;
 
        /* pg_temp? */
-       if (preferred >= 0)
-               t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
-                                   pool->lpgp_num_mask);
-       else
-               t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
-                                   pool->pgp_num_mask);
+       t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+                           pool->pgp_num_mask);
        pgid.ps = cpu_to_le16(t);
        pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
        if (pg) {
@@ -1080,23 +1060,20 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
                return NULL;
        }
 
-       /* don't forcefeed bad device ids to crush */
-       if (preferred >= osdmap->max_osd ||
-           preferred >= osdmap->crush->max_devices)
-               preferred = -1;
-
-       if (preferred >= 0)
-               pps = ceph_stable_mod(ps,
-                                     le32_to_cpu(pool->v.lpgp_num),
-                                     pool->lpgp_num_mask);
-       else
-               pps = ceph_stable_mod(ps,
-                                     le32_to_cpu(pool->v.pgp_num),
-                                     pool->pgp_num_mask);
+       pps = ceph_stable_mod(ps,
+                             le32_to_cpu(pool->v.pgp_num),
+                             pool->pgp_num_mask);
        pps += poolid;
-       *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
-                            min_t(int, pool->v.size, *num),
-                            preferred, osdmap->osd_weight);
+       r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
+                         min_t(int, pool->v.size, *num),
+                         osdmap->osd_weight);
+       if (r < 0) {
+               pr_err("error %d from crush rule: pool %d ruleset %d type %d"
+                      " size %d\n", r, poolid, pool->v.crush_ruleset,
+                      pool->v.type, pool->v.size);
+               return NULL;
+       }
+       *num = r;
        return osds;
 }
 
index cd0981977f5c92ee82cb42651ebfcbfb64c9c82f..1cb0d8a6aa6c5cd3d23741cd5b941a23805b08bd 100644 (file)
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
                no_module = request_module("netdev-%s", name);
        if (no_module && capable(CAP_SYS_MODULE)) {
                if (!request_module("%s", name))
-                       pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
-                              name);
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
        }
 }
 EXPORT_SYMBOL(dev_load);
@@ -2089,25 +2089,6 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
        return 0;
 }
 
-/*
- * Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested or the sk-reference
- * is needed on driver level for other reasons, e.g. see net/can/raw.c
- */
-static inline void skb_orphan_try(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       if (sk && !skb_shinfo(skb)->tx_flags) {
-               /* skb_tx_hash() wont be able to get sk.
-                * We copy sk_hash into skb->rxhash
-                */
-               if (!skb->rxhash)
-                       skb->rxhash = sk->sk_hash;
-               skb_orphan(skb);
-       }
-}
-
 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
        return ((features & NETIF_F_GEN_CSUM) ||
@@ -2193,8 +2174,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                if (!list_empty(&ptype_all))
                        dev_queue_xmit_nit(skb, dev);
 
-               skb_orphan_try(skb);
-
                features = netif_skb_features(skb);
 
                if (vlan_tx_tag_present(skb) &&
@@ -2304,7 +2283,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
-               hash = (__force u16) skb->protocol ^ skb->rxhash;
+               hash = (__force u16) skb->protocol;
        hash = jhash_1word(hash, hashrnd);
 
        return (u16) (((u64) hash * qcount) >> 32) + qoffset;
@@ -2465,8 +2444,12 @@ static void skb_update_prio(struct sk_buff *skb)
 {
        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
 
-       if ((!skb->priority) && (skb->sk) && map)
-               skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+       if (!skb->priority && skb->sk && map) {
+               unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+
+               if (prioidx < map->priomap_len)
+                       skb->priority = map->priomap[prioidx];
+       }
 }
 #else
 #define skb_update_prio(skb)
@@ -6300,7 +6283,8 @@ static struct hlist_head *netdev_create_hash(void)
 /* Initialize per network namespace state */
 static int __net_init netdev_init(struct net *net)
 {
-       INIT_LIST_HEAD(&net->dev_base_head);
+       if (net != &init_net)
+               INIT_LIST_HEAD(&net->dev_base_head);
 
        net->dev_name_head = netdev_create_hash();
        if (net->dev_name_head == NULL)
index 3252e7e0a0055ad07fa1c02979500650703f6820..d23b6682f4e95cfd029cd19db31252184ec03d2d 100644 (file)
@@ -36,9 +36,6 @@
 #define TRACE_ON 1
 #define TRACE_OFF 0
 
-static void send_dm_alert(struct work_struct *unused);
-
-
 /*
  * Globals, our netlink socket pointer
  * and the work handle that will send up
@@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF;
 static DEFINE_MUTEX(trace_state_mutex);
 
 struct per_cpu_dm_data {
-       struct work_struct dm_alert_work;
-       struct sk_buff __rcu *skb;
-       atomic_t dm_hit_count;
-       struct timer_list send_timer;
-       int cpu;
+       spinlock_t              lock;
+       struct sk_buff          *skb;
+       struct work_struct      dm_alert_work;
+       struct timer_list       send_timer;
 };
 
 struct dm_hw_stat_delta {
@@ -78,13 +74,13 @@ static int dm_delay = 1;
 static unsigned long dm_hw_check_delta = 2*HZ;
 static LIST_HEAD(hw_stats_list);
 
-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
+static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
 {
        size_t al;
        struct net_dm_alert_msg *msg;
        struct nlattr *nla;
        struct sk_buff *skb;
-       struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
+       unsigned long flags;
 
        al = sizeof(struct net_dm_alert_msg);
        al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
                                  sizeof(struct net_dm_alert_msg));
                msg = nla_data(nla);
                memset(msg, 0, al);
-       } else
-               schedule_work_on(data->cpu, &data->dm_alert_work);
-
-       /*
-        * Don't need to lock this, since we are guaranteed to only
-        * run this on a single cpu at a time.
-        * Note also that we only update data->skb if the old and new skb
-        * pointers don't match.  This ensures that we don't continually call
-        * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
-        */
-       if (skb != oskb) {
-               rcu_assign_pointer(data->skb, skb);
-
-               synchronize_rcu();
-
-               atomic_set(&data->dm_hit_count, dm_hit_limit);
+       } else {
+               mod_timer(&data->send_timer, jiffies + HZ / 10);
        }
 
+       spin_lock_irqsave(&data->lock, flags);
+       swap(data->skb, skb);
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       return skb;
 }
 
-static void send_dm_alert(struct work_struct *unused)
+static void send_dm_alert(struct work_struct *work)
 {
        struct sk_buff *skb;
-       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+       struct per_cpu_dm_data *data;
 
-       WARN_ON_ONCE(data->cpu != smp_processor_id());
+       data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
 
-       /*
-        * Grab the skb we're about to send
-        */
-       skb = rcu_dereference_protected(data->skb, 1);
-
-       /*
-        * Replace it with a new one
-        */
-       reset_per_cpu_data(data);
+       skb = reset_per_cpu_data(data);
 
-       /*
-        * Ship it!
-        */
        if (skb)
                genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
-
-       put_cpu_var(dm_cpu_data);
 }
 
 /*
  * This is the timer function to delay the sending of an alert
  * in the event that more drops will arrive during the
- * hysteresis period.  Note that it operates under the timer interrupt
- * so we don't need to disable preemption here
+ * hysteresis period.
  */
-static void sched_send_work(unsigned long unused)
+static void sched_send_work(unsigned long _data)
 {
-       struct per_cpu_dm_data *data =  &get_cpu_var(dm_cpu_data);
-
-       schedule_work_on(smp_processor_id(), &data->dm_alert_work);
+       struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
 
-       put_cpu_var(dm_cpu_data);
+       schedule_work(&data->dm_alert_work);
 }
 
 static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        struct nlattr *nla;
        int i;
        struct sk_buff *dskb;
-       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
-
+       struct per_cpu_dm_data *data;
+       unsigned long flags;
 
-       rcu_read_lock();
-       dskb = rcu_dereference(data->skb);
+       local_irq_save(flags);
+       data = &__get_cpu_var(dm_cpu_data);
+       spin_lock(&data->lock);
+       dskb = data->skb;
 
        if (!dskb)
                goto out;
 
-       if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
-               /*
-                * we're already at zero, discard this hit
-                */
-               goto out;
-       }
-
        nlh = (struct nlmsghdr *)dskb->data;
        nla = genlmsg_data(nlmsg_data(nlh));
        msg = nla_data(nla);
        for (i = 0; i < msg->entries; i++) {
                if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
                        msg->points[i].count++;
-                       atomic_inc(&data->dm_hit_count);
                        goto out;
                }
        }
-
+       if (msg->entries == dm_hit_limit)
+               goto out;
        /*
         * We need to create a new entry
         */
@@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
 
        if (!timer_pending(&data->send_timer)) {
                data->send_timer.expires = jiffies + dm_delay * HZ;
-               add_timer_on(&data->send_timer, smp_processor_id());
+               add_timer(&data->send_timer);
        }
 
 out:
-       rcu_read_unlock();
-       put_cpu_var(dm_cpu_data);
-       return;
+       spin_unlock_irqrestore(&data->lock, flags);
 }
 
 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void)
 
        for_each_possible_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
-               data->cpu = cpu;
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
                init_timer(&data->send_timer);
-               data->send_timer.data = cpu;
+               data->send_timer.data = (unsigned long)data;
                data->send_timer.function = sched_send_work;
+               spin_lock_init(&data->lock);
                reset_per_cpu_data(data);
        }
 
@@ -468,3 +432,4 @@ module_exit(exit_net_drop_monitor);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
+MODULE_ALIAS_GENL_FAMILY("NET_DM");
index a3eddb515d1b282dc9dd8c597e09d8476de7916d..d4ce2dc712e34b7b1cb974c5e938313f58e9a8aa 100644 (file)
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp)
 /**
  *     sk_unattached_filter_create - create an unattached filter
  *     @fprog: the filter program
- *     @sk: the socket to use
+ *     @pfp: the unattached filter that is created
  *
- * Create a filter independent ofr any socket. We first run some
+ * Create a filter independent of any socket. We first run some
  * sanity checks on it to make sure it does not explode on us later.
  * If an error occurs or there is insufficient memory for the filter
  * a negative errno code is returned. On success the return is zero.
index eb09f8bbbf075bcc10f3335198dc8e097c2f9316..d81d026138f0810471ce4cf2540c0ec4229d853f 100644 (file)
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
        rcu_read_lock_bh();
        nht = rcu_dereference_bh(tbl->nht);
 
-       for (h = 0; h < (1 << nht->hash_shift); h++) {
-               if (h < s_h)
-                       continue;
+       for (h = s_h; h < (1 << nht->hash_shift); h++) {
                if (h > s_h)
                        s_idx = 0;
                for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 
        read_lock_bh(&tbl->lock);
 
-       for (h = 0; h <= PNEIGH_HASHMASK; h++) {
-               if (h < s_h)
-                       continue;
+       for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
                if (h > s_h)
                        s_idx = 0;
                for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
        struct neigh_table *tbl;
        int t, family, s_t;
        int proxy = 0;
-       int err = 0;
+       int err;
 
        read_lock(&neigh_tbl_lock);
        family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 
        s_t = cb->args[0];
 
-       for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
+       for (tbl = neigh_tables, t = 0; tbl;
             tbl = tbl->next, t++) {
                if (t < s_t || (family && tbl->family != family))
                        continue;
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                        err = pneigh_dump_table(tbl, skb, cb);
                else
                        err = neigh_dump_table(tbl, skb, cb);
+               if (err < 0)
+                       break;
        }
        read_unlock(&neigh_tbl_lock);
 
index dddbacb8f28ccba180cd20855476d5d7e351b1b8..42f1e1c7514f67ad56df06ffe53e6c94a094681a 100644 (file)
@@ -27,7 +27,9 @@ static DEFINE_MUTEX(net_mutex);
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
 
-struct net init_net;
+struct net init_net = {
+       .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
+};
 EXPORT_SYMBOL(init_net);
 
 #define INITIAL_NET_GEN_PTRS   13 /* +1 for len +2 for rcu_head */
index 3d84fb9d88739629b32c77f1a1d77c7f55ad7630..f9f40b932e4b855fc1a4dc3b3c74620efdc4f970 100644 (file)
@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 {
-       int total_len, eth_len, ip_len, udp_len;
+       int total_len, ip_len, udp_len;
        struct sk_buff *skb;
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
 
        udp_len = len + sizeof(*udph);
-       ip_len = eth_len = udp_len + sizeof(*iph);
-       total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
+       ip_len = udp_len + sizeof(*iph);
+       total_len = ip_len + LL_RESERVED_SPACE(np->dev);
 
-       skb = find_skb(np, total_len, total_len - len);
+       skb = find_skb(np, total_len + np->dev->needed_tailroom,
+                      total_len - len);
        if (!skb)
                return;
 
        skb_copy_to_linear_data(skb, msg, len);
-       skb->len += len;
+       skb_put(skb, len);
 
        skb_push(skb, sizeof(*udph));
        skb_reset_transport_header(skb);
index 5b8aa2fae48b84e3255ecdfe83535457bd170901..b2e9caa1ad1aa761ba98886a23ce3823bdc179ee 100644 (file)
@@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
                return -ENOSPC;
        }
        set_bit(prioidx, prioidx_map);
+       if (atomic_read(&max_prioidx) < prioidx)
+               atomic_set(&max_prioidx, prioidx);
        spin_unlock_irqrestore(&prioidx_map_lock, flags);
-       atomic_set(&max_prioidx, prioidx);
        *prio = prioidx;
        return 0;
 }
@@ -64,7 +65,7 @@ static void put_prioidx(u32 idx)
        spin_unlock_irqrestore(&prioidx_map_lock, flags);
 }
 
-static void extend_netdev_table(struct net_device *dev, u32 new_len)
+static int extend_netdev_table(struct net_device *dev, u32 new_len)
 {
        size_t new_size = sizeof(struct netprio_map) +
                           ((sizeof(u32) * new_len));
@@ -76,7 +77,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
 
        if (!new_priomap) {
                pr_warn("Unable to alloc new priomap!\n");
-               return;
+               return -ENOMEM;
        }
 
        for (i = 0;
@@ -89,46 +90,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
        rcu_assign_pointer(dev->priomap, new_priomap);
        if (old_priomap)
                kfree_rcu(old_priomap, rcu);
+       return 0;
 }
 
-static void update_netdev_tables(void)
+static int write_update_netdev_table(struct net_device *dev)
 {
+       int ret = 0;
+       u32 max_len;
+       struct netprio_map *map;
+
+       rtnl_lock();
+       max_len = atomic_read(&max_prioidx) + 1;
+       map = rtnl_dereference(dev->priomap);
+       if (!map || map->priomap_len < max_len)
+               ret = extend_netdev_table(dev, max_len);
+       rtnl_unlock();
+
+       return ret;
+}
+
+static int update_netdev_tables(void)
+{
+       int ret = 0;
        struct net_device *dev;
-       u32 max_len = atomic_read(&max_prioidx) + 1;
+       u32 max_len;
        struct netprio_map *map;
 
        rtnl_lock();
+       max_len = atomic_read(&max_prioidx) + 1;
        for_each_netdev(&init_net, dev) {
                map = rtnl_dereference(dev->priomap);
-               if ((!map) ||
-                   (map->priomap_len < max_len))
-                       extend_netdev_table(dev, max_len);
+               /*
+                * don't allocate priomap if we didn't
+                * change net_prio.ifpriomap (map == NULL),
+                * this will speed up skb_update_prio.
+                */
+               if (map && map->priomap_len < max_len) {
+                       ret = extend_netdev_table(dev, max_len);
+                       if (ret < 0)
+                               break;
+               }
        }
        rtnl_unlock();
+       return ret;
 }
 
 static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
 {
        struct cgroup_netprio_state *cs;
-       int ret;
+       int ret = -EINVAL;
 
        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
        if (!cs)
                return ERR_PTR(-ENOMEM);
 
-       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
-               kfree(cs);
-               return ERR_PTR(-EINVAL);
-       }
+       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
+               goto out;
 
        ret = get_prioidx(&cs->prioidx);
-       if (ret != 0) {
+       if (ret < 0) {
                pr_warn("No space in priority index array\n");
-               kfree(cs);
-               return ERR_PTR(ret);
+               goto out;
+       }
+
+       ret = update_netdev_tables();
+       if (ret < 0) {
+               put_prioidx(cs->prioidx);
+               goto out;
        }
 
        return &cs->css;
+out:
+       kfree(cs);
+       return ERR_PTR(ret);
 }
 
 static void cgrp_destroy(struct cgroup *cgrp)
@@ -141,7 +175,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
        rtnl_lock();
        for_each_netdev(&init_net, dev) {
                map = rtnl_dereference(dev->priomap);
-               if (map)
+               if (map && cs->prioidx < map->priomap_len)
                        map->priomap[cs->prioidx] = 0;
        }
        rtnl_unlock();
@@ -165,7 +199,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, dev) {
                map = rcu_dereference(dev->priomap);
-               priority = map ? map->priomap[prioidx] : 0;
+               priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
                cb->fill(cb, dev->name, priority);
        }
        rcu_read_unlock();
@@ -220,13 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
        if (!dev)
                goto out_free_devname;
 
-       update_netdev_tables();
-       ret = 0;
+       ret = write_update_netdev_table(dev);
+       if (ret < 0)
+               goto out_put_dev;
+
        rcu_read_lock();
        map = rcu_dereference(dev->priomap);
        if (map)
                map->priomap[prioidx] = priority;
        rcu_read_unlock();
+
+out_put_dev:
        dev_put(dev);
 
 out_free_devname:
index 016694d624843c8ca1df3013639ffd4f6ae75f39..d124306b81fdbf73171d0921e401321f44f63711 100644 (file)
@@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+       if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
                void *data = netdev_alloc_frag(fragsz);
 
                if (likely(data)) {
@@ -1755,6 +1755,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
@@ -3361,7 +3362,7 @@ EXPORT_SYMBOL(kfree_skb_partial);
  * @to: prior buffer
  * @from: buffer to add
  * @fragstolen: pointer to boolean
- *
+ * @delta_truesize: how much more was allocated than was requested
  */
 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
                      bool *fragstolen, int *delta_truesize)
index 653f8c0aedc54aafb08c6f451157f7ca7e432efa..9e5b71fda6ec0d726bd356bce1658f55091ebcf8 100644 (file)
@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
        gfp_t gfp_mask;
        long timeo;
        int err;
+       int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+       err = -EMSGSIZE;
+       if (npages > MAX_SKB_FRAGS)
+               goto failure;
 
        gfp_mask = sk->sk_allocation;
        if (gfp_mask & __GFP_WAIT)
@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
                        skb = alloc_skb(header_len, gfp_mask);
                        if (skb) {
-                               int npages;
                                int i;
 
                                /* No pages, we're done... */
                                if (!data_len)
                                        break;
 
-                               npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                                skb->truesize += data_len;
                                skb_shinfo(skb)->nr_frags = npages;
                                for (i = 0; i < npages; i++) {
index 6fbb2ad7bb6df480a71612054003a6176fc12447..16705611589ab6abd1a2e67811d0e38d6d8b4d26 100644 (file)
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        mtu = dev->mtu;
        pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
+       if (size > mtu) {
+               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+               err = -EINVAL;
+               goto out_dev;
+       }
+
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        if (err < 0)
                goto out_skb;
 
-       if (size > mtu) {
-               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EINVAL;
-               goto out_skb;
-       }
-
        skb->dev = dev;
        skb->sk  = sk;
        skb->protocol = htons(ETH_P_IEEE802154);
index c48adc565e9239a45c600d791c8f97059e22696f..667c1d4ca9847c627127dc633e19c7c94f354188 100644 (file)
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                case CIPSO_V4_TAG_LOCAL:
                        /* This is a non-standard tag that we only allow for
                         * local connections, so if the incoming interface is
-                        * not the loopback device drop the packet. */
-                       if (!(skb->dev->flags & IFF_LOOPBACK)) {
+                        * not the loopback device drop the packet. Further,
+                        * there is no legitimate reason for setting this from
+                        * userspace so reject it if skb is NULL. */
+                       if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
                                err_offset = opt_iter;
                                goto validate_return_locked;
                        }
index 89a47b35905dcc6e1c3bb94b0db7c6a32a61e8fc..cb982a61536fade811908a18e6119f513914741e 100644 (file)
@@ -459,28 +459,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
        u32 align = max_t(u32, blksize, esp->padlen);
-       u32 rem;
-
-       mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-       rem = mtu & (align - 1);
-       mtu &= ~(align - 1);
+       unsigned int net_adj;
 
        switch (x->props.mode) {
-       case XFRM_MODE_TUNNEL:
-               break;
-       default:
        case XFRM_MODE_TRANSPORT:
-               /* The worst case */
-               mtu -= blksize - 4;
-               mtu += min_t(u32, blksize - 4, rem);
-               break;
        case XFRM_MODE_BEET:
-               /* The worst case. */
-               mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
+               net_adj = sizeof(struct iphdr);
                break;
+       case XFRM_MODE_TUNNEL:
+               net_adj = 0;
+               break;
+       default:
+               BUG();
        }
 
-       return mtu - 2;
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+                net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
index 95e61596e605db31b3212328781fcbd7ea2f8dae..f9ee7417f6a024b9357e84335c04b2cec80ae1e9 100644 (file)
@@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
 
        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
-                          sk->sk_protocol, inet_sk_flowi_flags(sk),
+                          sk->sk_protocol,
+                          inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
                           ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
index d4d61b694fab9bc497b1cccb808a3a568ad30cc2..dfba343b25092de39c9a5d1eea5d43226690e984 100644 (file)
@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
 }
 EXPORT_SYMBOL(inet_peer_xrlim_allow);
 
+static void inetpeer_inval_rcu(struct rcu_head *head)
+{
+       struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
+
+       spin_lock_bh(&gc_lock);
+       list_add_tail(&p->gc_list, &gc_list);
+       spin_unlock_bh(&gc_lock);
+
+       schedule_delayed_work(&gc_work, gc_delay);
+}
+
 void inetpeer_invalidate_tree(int family)
 {
        struct inet_peer *old, *new, *prev;
@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family)
        prev = cmpxchg(&base->root, old, new);
        if (prev == old) {
                base->total = 0;
-               spin_lock(&gc_lock);
-               list_add_tail(&prev->gc_list, &gc_list);
-               spin_unlock(&gc_lock);
-               schedule_delayed_work(&gc_work, gc_delay);
+               call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
        }
 
 out:
index e5c44fc586abe7157f8b75b8f164a7222ae8548c..ab09b126423ce3e56fd1fee2f6bda54e4b851022 100644 (file)
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb)
        struct ip_options *opt  = &(IPCB(skb)->opt);
 
        IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
index a9e519ad6db53d544c73d145724602cde1e3f48e..c94bbc6f2ba331bb9e261692151f55f78277258b 100644 (file)
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
        struct ip_options *opt = &(IPCB(skb)->opt);
 
        IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
index a43b87dfe800c043c7fcc65af316b4d193ab3455..c8d28c433b2b0dc958f7bdebaa77f2b899dfd22e 100644 (file)
@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
-                             struct request_values *rvp)
+                             struct request_values *rvp,
+                             u16 queue_mapping)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct flowi4 fl4;
@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
 
+               skb_set_queue_mapping(skb, queue_mapping);
                err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
                                            ireq->rmt_addr,
                                            ireq->opt);
@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
                              struct request_values *rvp)
 {
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-       return tcp_v4_send_synack(sk, NULL, req, rvp);
+       return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
 }
 
 /*
@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
        if (tcp_v4_send_synack(sk, dst, req,
-                              (struct request_values *)&tmp_ext) ||
+                              (struct request_values *)&tmp_ext,
+                              skb_get_queue_mapping(skb)) ||
            want_cookie)
                goto drop_and_free;
 
index 1e62b7557b00e1e0897f480d9391d018dfd01dcb..db1521fcda5b3fd182a3068c9b86d5161e5e4d56 100644 (file)
@@ -413,19 +413,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
        u32 align = max_t(u32, blksize, esp->padlen);
-       u32 rem;
+       unsigned int net_adj;
 
-       mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-       rem = mtu & (align - 1);
-       mtu &= ~(align - 1);
-
-       if (x->props.mode != XFRM_MODE_TUNNEL) {
-               u32 padsize = ((blksize - 1) & 7) + 1;
-               mtu -= blksize - padsize;
-               mtu += min_t(u32, blksize - padsize, rem);
-       }
+       if (x->props.mode != XFRM_MODE_TUNNEL)
+               net_adj = sizeof(struct ipv6hdr);
+       else
+               net_adj = 0;
 
-       return mtu - 2;
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+                net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
index 0c220a416626af196f534ab062920c169f6dcd7a..60832766196054f1f03d2aa12558febf68e032d8 100644 (file)
@@ -1349,8 +1349,8 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
                        if (w->leaf && fn->fn_flags & RTN_RTINFO) {
                                int err;
 
-                               if (w->count < w->skip) {
-                                       w->count++;
+                               if (w->skip) {
+                                       w->skip--;
                                        continue;
                                }
 
@@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                                neigh_flags = neigh->flags;
                                neigh_release(neigh);
                        }
-                       if (neigh_flags & NTF_ROUTER) {
+                       if (!(neigh_flags & NTF_ROUTER)) {
                                RT6_TRACE("purging route %p via non-router but gateway\n",
                                          rt);
                                return -1;
index d99fdc699625ca34252a33e84e21d8179e0fad7a..decc21d19c53e4b0c073b44e02deba46b6eac432 100644 (file)
@@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb)
        hdr->hop_limit--;
 
        IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
        return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
                       ip6_forward_finish);
 
@@ -1187,6 +1188,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
        return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
 }
 
+static void ip6_append_data_mtu(int *mtu,
+                               int *maxfraglen,
+                               unsigned int fragheaderlen,
+                               struct sk_buff *skb,
+                               struct rt6_info *rt)
+{
+       if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+               if (skb == NULL) {
+                       /* first fragment, reserve header_len */
+                       *mtu = *mtu - rt->dst.header_len;
+
+               } else {
+                       /*
+                        * this fragment is not first, the headers
+                        * space is regarded as data space.
+                        */
+                       *mtu = dst_mtu(rt->dst.path);
+               }
+               *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+                             + fragheaderlen - sizeof(struct frag_hdr);
+       }
+}
+
 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        int offset, int len, int odd, struct sk_buff *skb),
        void *from, int length, int transhdrlen,
@@ -1196,7 +1220,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_cork *cork;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *skb_prev = NULL;
        unsigned int maxfraglen, fragheaderlen;
        int exthdrlen;
        int dst_exthdrlen;
@@ -1253,8 +1277,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                inet->cork.fl.u.ip6 = *fl6;
                np->cork.hop_limit = hlimit;
                np->cork.tclass = tclass;
-               mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
-                     rt->dst.dev->mtu : dst_mtu(&rt->dst);
+               if (rt->dst.flags & DST_XFRM_TUNNEL)
+                       mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+                             rt->dst.dev->mtu : dst_mtu(&rt->dst);
+               else
+                       mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+                             rt->dst.dev->mtu : dst_mtu(rt->dst.path);
                if (np->frag_size < mtu) {
                        if (np->frag_size)
                                mtu = np->frag_size;
@@ -1350,25 +1378,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                        unsigned int fraglen;
                        unsigned int fraggap;
                        unsigned int alloclen;
-                       struct sk_buff *skb_prev;
 alloc_new_skb:
-                       skb_prev = skb;
-
                        /* There's no room in the current skb */
-                       if (skb_prev)
-                               fraggap = skb_prev->len - maxfraglen;
+                       if (skb)
+                               fraggap = skb->len - maxfraglen;
                        else
                                fraggap = 0;
+                       /* update mtu and maxfraglen if necessary */
+                       if (skb == NULL || skb_prev == NULL)
+                               ip6_append_data_mtu(&mtu, &maxfraglen,
+                                                   fragheaderlen, skb, rt);
+
+                       skb_prev = skb;
 
                        /*
                         * If remaining data exceeds the mtu,
                         * we know we need more fragment(s).
                         */
                        datalen = length + fraggap;
-                       if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
-                               datalen = maxfraglen - fragheaderlen;
 
-                       fraglen = datalen + fragheaderlen;
+                       if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+                               datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
@@ -1377,13 +1407,16 @@ alloc_new_skb:
 
                        alloclen += dst_exthdrlen;
 
-                       /*
-                        * The last fragment gets additional space at tail.
-                        * Note: we overallocate on fragments with MSG_MODE
-                        * because we have no idea if we're the last one.
-                        */
-                       if (datalen == length + fraggap)
-                               alloclen += rt->dst.trailer_len;
+                       if (datalen != length + fraggap) {
+                               /*
+                                * this is not the last fragment, the trailer
+                                * space is regarded as data space.
+                                */
+                               datalen += rt->dst.trailer_len;
+                       }
+
+                       alloclen += rt->dst.trailer_len;
+                       fraglen = datalen + fragheaderlen;
 
                        /*
                         * We just reserve space for fragment header.
index b15dc08643a42f5a45ea9bb0bd2bd0ec1865a377..461e47c8e95620456e83710eaf99643c1382c8fc 100644 (file)
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
 {
        IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
                         IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+                        IPSTATS_MIB_OUTOCTETS, skb->len);
        return dst_output(skb);
 }
 
index 999a982ad3fd7d7abac40211b50320fc4c038109..becb048d18d402f0335bee4e9d0c198f94eba6c4 100644 (file)
@@ -2957,10 +2957,6 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
        net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
 
-#ifdef CONFIG_PROC_FS
-       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
-       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
-#endif
        net->ipv6.ip6_rt_gc_expire = 30*HZ;
 
        ret = 0;
@@ -2981,10 +2977,6 @@ out_ip6_dst_ops:
 
 static void __net_exit ip6_route_net_exit(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "ipv6_route");
-       proc_net_remove(net, "rt6_stats");
-#endif
        kfree(net->ipv6.ip6_null_entry);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        kfree(net->ipv6.ip6_prohibit_entry);
@@ -2993,11 +2985,33 @@ static void __net_exit ip6_route_net_exit(struct net *net)
        dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
+static int __net_init ip6_route_net_init_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+#endif
+       return 0;
+}
+
+static void __net_exit ip6_route_net_exit_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_remove(net, "ipv6_route");
+       proc_net_remove(net, "rt6_stats");
+#endif
+}
+
 static struct pernet_operations ip6_route_net_ops = {
        .init = ip6_route_net_init,
        .exit = ip6_route_net_exit,
 };
 
+static struct pernet_operations ip6_route_net_late_ops = {
+       .init = ip6_route_net_init_late,
+       .exit = ip6_route_net_exit_late,
+};
+
 static struct notifier_block ip6_route_dev_notifier = {
        .notifier_call = ip6_route_dev_notify,
        .priority = 0,
@@ -3047,19 +3061,25 @@ int __init ip6_route_init(void)
        if (ret)
                goto xfrm6_init;
 
+       ret = register_pernet_subsys(&ip6_route_net_late_ops);
+       if (ret)
+               goto fib6_rules_init;
+
        ret = -ENOBUFS;
        if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
        ret = register_netdevice_notifier(&ip6_route_dev_notifier);
        if (ret)
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
 out:
        return ret;
 
+out_register_late_subsys:
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
 fib6_rules_init:
        fib6_rules_cleanup();
 xfrm6_init:
@@ -3078,6 +3098,7 @@ out_kmem_cache:
 void ip6_route_cleanup(void)
 {
        unregister_netdevice_notifier(&ip6_route_dev_notifier);
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
        fib6_rules_cleanup();
        xfrm6_fini();
        fib6_gc_cleanup();
index 554d5999abc40534e37c7817f13722b620b821e7..9df64a50b07569f3f9050a5a73a3e6c448c63502 100644 (file)
@@ -476,7 +476,8 @@ out:
 
 
 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
-                             struct request_values *rvp)
+                             struct request_values *rvp,
+                             u16 queue_mapping)
 {
        struct inet6_request_sock *treq = inet6_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
                fl6.daddr = treq->rmt_addr;
+               skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
                             struct request_values *rvp)
 {
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-       return tcp_v6_send_synack(sk, req, rvp);
+       return tcp_v6_send_synack(sk, req, rvp, 0);
 }
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1210,10 +1212,12 @@ have_isn:
        tcp_rsk(req)->snt_isn = isn;
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
-       security_inet_conn_request(sk, skb, req);
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_release;
 
        if (tcp_v6_send_synack(sk, req,
-                              (struct request_values *)&tmp_ext) ||
+                              (struct request_values *)&tmp_ext,
+                              skb_get_queue_mapping(skb)) ||
            want_cookie)
                goto drop_and_free;
 
index 07d7d55a1b93af13013b23c361d8c2f7ff8382aa..cd6f7a991d8035bdfc6d883a8bcf21a433405030 100644 (file)
@@ -372,7 +372,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                        skb_trim(skb, skb->dev->mtu);
        }
        skb->protocol = ETH_P_AF_IUCV;
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
        nskb = skb_clone(skb, GFP_ATOMIC);
        if (!nskb)
                return -ENOMEM;
index 443591d629caadff0da53b2c861b8748e61edf7e..47b259fccd278dc168cb5109644d440530a5c5ed 100644 (file)
@@ -42,6 +42,11 @@ struct l2tp_eth {
        struct sock             *tunnel_sock;
        struct l2tp_session     *session;
        struct list_head        list;
+       atomic_long_t           tx_bytes;
+       atomic_long_t           tx_packets;
+       atomic_long_t           rx_bytes;
+       atomic_long_t           rx_packets;
+       atomic_long_t           rx_errors;
 };
 
 /* via l2tp_session_priv() */
@@ -88,24 +93,40 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        struct l2tp_eth *priv = netdev_priv(dev);
        struct l2tp_session *session = priv->session;
 
+       atomic_long_add(skb->len, &priv->tx_bytes);
+       atomic_long_inc(&priv->tx_packets);
+
        l2tp_xmit_skb(session, skb, session->hdr_len);
 
-       dev->stats.tx_bytes += skb->len;
-       dev->stats.tx_packets++;
+       return NETDEV_TX_OK;
+}
 
-       return 0;
+static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
+                                                     struct rtnl_link_stats64 *stats)
+{
+       struct l2tp_eth *priv = netdev_priv(dev);
+
+       stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
+       stats->tx_packets = atomic_long_read(&priv->tx_packets);
+       stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
+       stats->rx_packets = atomic_long_read(&priv->rx_packets);
+       stats->rx_errors  = atomic_long_read(&priv->rx_errors);
+       return stats;
 }
 
+
 static struct net_device_ops l2tp_eth_netdev_ops = {
        .ndo_init               = l2tp_eth_dev_init,
        .ndo_uninit             = l2tp_eth_dev_uninit,
        .ndo_start_xmit         = l2tp_eth_dev_xmit,
+       .ndo_get_stats64        = l2tp_eth_get_stats64,
 };
 
 static void l2tp_eth_dev_setup(struct net_device *dev)
 {
        ether_setup(dev);
-       dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags         &= ~IFF_TX_SKB_SHARING;
+       dev->features           |= NETIF_F_LLTX;
        dev->netdev_ops         = &l2tp_eth_netdev_ops;
        dev->destructor         = free_netdev;
 }
@@ -114,17 +135,17 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
 {
        struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
        struct net_device *dev = spriv->dev;
+       struct l2tp_eth *priv = netdev_priv(dev);
 
        if (session->debug & L2TP_MSG_DATA) {
                unsigned int length;
-               u8 *ptr = skb->data;
 
                length = min(32u, skb->len);
                if (!pskb_may_pull(skb, length))
                        goto error;
 
                pr_debug("%s: eth recv\n", session->name);
-               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
        }
 
        if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -139,15 +160,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
        nf_reset(skb);
 
        if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += data_len;
-       } else
-               dev->stats.rx_errors++;
-
+               atomic_long_inc(&priv->rx_packets);
+               atomic_long_add(data_len, &priv->rx_bytes);
+       } else {
+               atomic_long_inc(&priv->rx_errors);
+       }
        return;
 
 error:
-       dev->stats.rx_errors++;
+       atomic_long_inc(&priv->rx_errors);
        kfree_skb(skb);
 }
 
@@ -162,6 +183,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
                if (dev) {
                        unregister_netdev(dev);
                        spriv->dev = NULL;
+                       module_put(THIS_MODULE);
                }
        }
 }
@@ -249,6 +271,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
        if (rc < 0)
                goto out_del_dev;
 
+       __module_get(THIS_MODULE);
        /* Must be done after register_netdev() */
        strlcpy(session->ifname, dev->name, IFNAMSIZ);
 
index 889f5d13d7ba342b5ea2a2c447b1c6858b553de2..61d8b75d2686c0272a1618887c08c34de2b4abe5 100644 (file)
@@ -239,9 +239,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
-       int ret = -EINVAL;
+       int ret;
        int chk_addr_ret;
 
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               return -EINVAL;
+       if (addr_len < sizeof(struct sockaddr_l2tpip))
+               return -EINVAL;
+       if (addr->l2tp_family != AF_INET)
+               return -EINVAL;
+
        ret = -EADDRINUSE;
        read_lock_bh(&l2tp_ip_lock);
        if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -272,6 +279,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        sk_del_node_init(sk);
        write_unlock_bh(&l2tp_ip_lock);
        ret = 0;
+       sock_reset_flag(sk, SOCK_ZAPPED);
+
 out:
        release_sock(sk);
 
@@ -288,6 +297,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
        int rc;
 
+       if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+               return -EINVAL;
+
        if (addr_len < sizeof(*lsa))
                return -EINVAL;
 
@@ -311,6 +323,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        return rc;
 }
 
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+       if (sock_flag(sk, SOCK_ZAPPED))
+               return 0;
+
+       return udp_disconnect(sk, flags);
+}
+
 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
                           int *uaddr_len, int peer)
 {
@@ -444,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
                                           sk->sk_bound_dev_if);
                if (IS_ERR(rt))
                        goto no_route;
-               if (connected)
+               if (connected) {
                        sk_setup_caps(sk, &rt->dst);
-               else
-                       dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
+               } else {
+                       skb_dst_set(skb, &rt->dst);
+                       goto xmit;
+               }
        }
 
        /* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -455,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
         */
        skb_dst_set_noref(skb, &rt->dst);
 
+xmit:
        /* Queue the packet to IP for output */
        rc = ip_queue_xmit(skb, &inet->cork.fl);
        rcu_read_unlock();
@@ -530,7 +553,7 @@ static struct proto l2tp_ip_prot = {
        .close             = l2tp_ip_close,
        .bind              = l2tp_ip_bind,
        .connect           = l2tp_ip_connect,
-       .disconnect        = udp_disconnect,
+       .disconnect        = l2tp_ip_disconnect,
        .ioctl             = udp_ioctl,
        .destroy           = l2tp_ip_destroy_sock,
        .setsockopt        = ip_setsockopt,
index 0291d8d85f302f3a244a13c0d6a6a6465137882e..35e1e4bde58730d8395e2870d552230bca3a9c3d 100644 (file)
@@ -258,6 +258,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        int addr_type;
        int err;
 
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               return -EINVAL;
+       if (addr->l2tp_family != AF_INET6)
+               return -EINVAL;
        if (addr_len < sizeof(*addr))
                return -EINVAL;
 
@@ -331,6 +335,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        sk_del_node_init(sk);
        write_unlock_bh(&l2tp_ip6_lock);
 
+       sock_reset_flag(sk, SOCK_ZAPPED);
        release_sock(sk);
        return 0;
 
@@ -354,6 +359,9 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
        int     addr_type;
        int rc;
 
+       if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+               return -EINVAL;
+
        if (addr_len < sizeof(*lsa))
                return -EINVAL;
 
@@ -383,6 +391,14 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
        return rc;
 }
 
+static int l2tp_ip6_disconnect(struct sock *sk, int flags)
+{
+       if (sock_flag(sk, SOCK_ZAPPED))
+               return 0;
+
+       return udp_disconnect(sk, flags);
+}
+
 static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
                            int *uaddr_len, int peer)
 {
@@ -689,7 +705,7 @@ static struct proto l2tp_ip6_prot = {
        .close             = l2tp_ip6_close,
        .bind              = l2tp_ip6_bind,
        .connect           = l2tp_ip6_connect,
-       .disconnect        = udp_disconnect,
+       .disconnect        = l2tp_ip6_disconnect,
        .ioctl             = udp_ioctl,
        .destroy           = l2tp_ip6_destroy_sock,
        .setsockopt        = ipv6_setsockopt,
index 8577264378fe0a88e6b3aadcdc6d96333782aa95..ddc553e76671bae0eac8dec5ace2e63035c45869 100644 (file)
@@ -923,5 +923,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
 MODULE_DESCRIPTION("L2TP netlink");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0");
-MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
-            __stringify(NETLINK_GENERIC) "-type-" "l2tp");
+MODULE_ALIAS_GENL_FAMILY("l2tp");
index 26ddb699d693dcbc2f2610fdd4a35b1784b04b8c..c649188314cce99c17fca198e75d597c24701197 100644 (file)
@@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
        struct tid_ampdu_rx *tid_rx;
        unsigned long timeout;
 
+       rcu_read_lock();
        tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
-       if (!tid_rx)
+       if (!tid_rx) {
+               rcu_read_unlock();
                return;
+       }
 
        timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
        if (time_is_after_jiffies(timeout)) {
                mod_timer(&tid_rx->session_timer, timeout);
+               rcu_read_unlock();
                return;
        }
+       rcu_read_unlock();
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
index 495831ee48f1b007abe235852968e9fd3bd188b5..7d5108a867ad9ec5341fc5122a2b65cd792846c3 100644 (file)
@@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
                sinfo.filled = 0;
                sta_set_sinfo(sta, &sinfo);
 
-               if (sinfo.filled | STATION_INFO_TX_BITRATE)
+               if (sinfo.filled & STATION_INFO_TX_BITRATE)
                        data[i] = 100000 *
                                cfg80211_calculate_bitrate(&sinfo.txrate);
                i++;
-               if (sinfo.filled | STATION_INFO_RX_BITRATE)
+               if (sinfo.filled & STATION_INFO_RX_BITRATE)
                        data[i] = 100000 *
                                cfg80211_calculate_bitrate(&sinfo.rxrate);
                i++;
 
-               if (sinfo.filled | STATION_INFO_SIGNAL_AVG)
+               if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
                        data[i] = (u8)sinfo.signal_avg;
                i++;
        } else {
@@ -2093,6 +2093,9 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
        struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
        int i, ret;
 
+       if (!ieee80211_sdata_running(sdata))
+               return -ENETDOWN;
+
        if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
                ret = drv_set_bitrate_mask(local, sdata, mask);
                if (ret)
index d4c19a7773db24b12bacf0407330ebccd75ba772..8664111d05663d47678f2088f4169a6ab80fdb96 100644 (file)
@@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_configure_filter(local);
                break;
        default:
+               mutex_lock(&local->mtx);
+               if (local->hw_roc_dev == sdata->dev &&
+                   local->hw_roc_channel) {
+                       /* ignore return value since this is racy */
+                       drv_cancel_remain_on_channel(local);
+                       ieee80211_queue_work(&local->hw, &local->hw_roc_done);
+               }
+               mutex_unlock(&local->mtx);
+
+               flush_work(&local->hw_roc_start);
+               flush_work(&local->hw_roc_done);
+
                flush_work(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
index b3b3c264ff66b970beac9b5090cd086610266419..0db5d34a06b69c8c72798a8a7d4e6c4f642f7560 100644 (file)
@@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
        sdata->vif.bss_conf.qos = true;
 }
 
+static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+       lockdep_assert_held(&sdata->local->mtx);
+
+       sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+                               IEEE80211_STA_BEACON_POLL);
+       ieee80211_run_deferred_scan(sdata->local);
+}
+
+static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+       mutex_lock(&sdata->local->mtx);
+       __ieee80211_stop_poll(sdata);
+       mutex_unlock(&sdata->local->mtx);
+}
+
 static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
                                           u16 capab, bool erp_valid, u8 erp)
 {
@@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
        sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
 
        /* just to be sure */
-       sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                               IEEE80211_STA_BEACON_POLL);
+       ieee80211_stop_poll(sdata);
 
        ieee80211_led_assoc(local, 1);
 
@@ -1327,7 +1342,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
-       u8 bssid[ETH_ALEN];
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -1337,10 +1351,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(!ifmgd->associated))
                return;
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+       ieee80211_stop_poll(sdata);
 
        ifmgd->associated = NULL;
-       memset(ifmgd->bssid, 0, ETH_ALEN);
 
        /*
         * we need to commit the associated = NULL change because the
@@ -1360,7 +1373,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        netif_carrier_off(sdata->dev);
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, bssid);
+       sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
                ieee80211_sta_tear_down_BA_sessions(sta, tx);
@@ -1369,13 +1382,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* deauthenticate/disassociate now */
        if (tx || frame_buf)
-               ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
-                                              tx, frame_buf);
+               ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
+                                              reason, tx, frame_buf);
 
        /* flush out frame */
        if (tx)
                drv_flush(local, false);
 
+       /* clear bssid only after building the needed mgmt frames */
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+
        /* remove AP and TDLS peers */
        sta_info_flush(local, sdata);
 
@@ -1456,8 +1472,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
-       ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                         IEEE80211_STA_BEACON_POLL);
+       __ieee80211_stop_poll(sdata);
 
        mutex_lock(&local->iflist_mtx);
        ieee80211_recalc_ps(local, -1);
@@ -1477,7 +1492,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
                  round_jiffies_up(jiffies +
                                   IEEE80211_CONNECTION_IDLE_TIME));
 out:
-       ieee80211_run_deferred_scan(local);
        mutex_unlock(&local->mtx);
 }
 
@@ -1522,6 +1536,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
         * anymore. The timeout will be reset if the frame is ACKed by
         * the AP.
         */
+       ifmgd->probe_send_count++;
+
        if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
                ifmgd->nullfunc_failed = false;
                ieee80211_send_nullfunc(sdata->local, sdata, 0);
@@ -1538,7 +1554,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                                         0, (u32) -1, true, false);
        }
 
-       ifmgd->probe_send_count++;
        ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
        run_again(ifmgd, ifmgd->probe_timeout);
        if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
@@ -2159,15 +2174,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                       sdata->name, mgmt->sa, status_code);
                ieee80211_destroy_assoc_data(sdata, false);
        } else {
-               printk(KERN_DEBUG "%s: associated\n", sdata->name);
-
                if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
                        /* oops -- internal error -- send timeout for now */
-                       ieee80211_destroy_assoc_data(sdata, true);
-                       sta_info_destroy_addr(sdata, mgmt->bssid);
+                       ieee80211_destroy_assoc_data(sdata, false);
                        cfg80211_put_bss(*bss);
                        return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
                }
+               printk(KERN_DEBUG "%s: associated\n", sdata->name);
 
                /*
                 * destroy assoc_data afterwards, as otherwise an idle
@@ -2407,7 +2420,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
                                    sdata->name);
 #endif
+               mutex_lock(&local->mtx);
                ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
+               ieee80211_run_deferred_scan(local);
+               mutex_unlock(&local->mtx);
+
                mutex_lock(&local->iflist_mtx);
                ieee80211_recalc_ps(local, -1);
                mutex_unlock(&local->iflist_mtx);
@@ -2594,9 +2611,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[DEAUTH_DISASSOC_LEN];
 
-       ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                         IEEE80211_STA_BEACON_POLL);
-
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               false, frame_buf);
        mutex_unlock(&ifmgd->mtx);
@@ -2873,8 +2887,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
        u32 flags;
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-               sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL |
-                                       IEEE80211_STA_CONNECTION_POLL);
+               __ieee80211_stop_poll(sdata);
 
                /* let's probe the connection once */
                flags = sdata->local->hw.flags;
@@ -2943,7 +2956,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
        if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
                add_timer(&ifmgd->chswitch_timer);
        ieee80211_sta_reset_beacon_monitor(sdata);
+
+       mutex_lock(&sdata->local->mtx);
        ieee80211_restart_sta_timer(sdata);
+       mutex_unlock(&sdata->local->mtx);
 }
 #endif
 
@@ -3105,7 +3121,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
        }
 
        local->oper_channel = cbss->channel;
-       ieee80211_hw_config(local, 0);
+       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        if (!have_sta) {
                u32 rates = 0, basic_rates = 0;
index f054e94901a295443abcae7c3cfddda963f8d7c4..935aa4b6deee0220737ee4c1cebad69472dea343 100644 (file)
@@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
                return;
        }
 
+       /* was never transmitted */
+       if (local->hw_roc_skb) {
+               u64 cookie;
+
+               cookie = local->hw_roc_cookie ^ 2;
+
+               cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
+                                       local->hw_roc_skb->data,
+                                       local->hw_roc_skb->len, false,
+                                       GFP_KERNEL);
+
+               kfree_skb(local->hw_roc_skb);
+               local->hw_roc_skb = NULL;
+               local->hw_roc_skb_for_status = NULL;
+       }
+
        if (!local->hw_roc_for_tx)
                cfg80211_remain_on_channel_expired(local->hw_roc_dev,
                                                   local->hw_roc_cookie,
index 2d1acc6c54455de4d397dc64cc471d8890e95bc7..f9e51ef8dfa2432ec44168feebc82579f3ac0be6 100644 (file)
@@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
                        max_rates = sband->n_bitrates;
        }
 
-       msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+       msp = kzalloc(sizeof(*msp), gfp);
        if (!msp)
                return NULL;
 
index 7bcecf73aafbf9dadb87a2c8a114e3eb92490131..965e6ec0adb6c12393ced183a44463a63e09a9ff 100644 (file)
@@ -2455,7 +2455,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
         * frames that we didn't handle, including returning unknown
         * ones. For all other modes we will return them to the sender,
         * setting the 0x80 bit in the action category, as required by
-        * 802.11-2007 7.3.1.11.
+        * 802.11-2012 9.24.4.
         * Newer versions of hostapd shall also use the management frame
         * registration mechanisms, but older ones still use cooked
         * monitor interfaces so push all frames there.
@@ -2465,6 +2465,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
                return RX_DROP_MONITOR;
 
+       if (is_multicast_ether_addr(mgmt->da))
+               return RX_DROP_MONITOR;
+
        /* do not return rejected action frames */
        if (mgmt->u.action.category & 0x80)
                return RX_DROP_UNUSABLE;
index f5b1638fbf8092a5ac30eebaea5b46b0b840867a..de455f8bbb91c0ffbedd2c911623380946d07d0b 100644 (file)
@@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        /* make the station visible */
        sta_info_hash_add(local, sta);
 
-       list_add(&sta->list, &local->sta_list);
+       list_add_rcu(&sta->list, &local->sta_list);
 
        set_sta_flag(sta, WLAN_STA_INSERTED);
 
@@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        if (ret)
                return ret;
 
-       list_del(&sta->list);
+       list_del_rcu(&sta->list);
 
        mutex_lock(&local->key_mtx);
        for (i = 0; i < NUM_DEFAULT_KEYS; i++)
index 3bb24a121c95f7e82e35719a6aebf582038c449c..a470e1123a5576ed5e14b779ed4a9213cda407b7 100644 (file)
@@ -271,6 +271,9 @@ struct sta_ampdu_mlme {
  * @plink_timer: peer link watch timer
  * @plink_timer_was_running: used by suspend/resume to restore timers
  * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ *     calculating clockdrift
+ * @ch_type: peer's channel type
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -278,6 +281,8 @@ struct sta_ampdu_mlme {
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
  * @beacon_loss_count: number of times beacon loss has triggered
+ * @supports_40mhz: tracks whether the station advertised 40 MHz support
+ *     as we overwrite its HT parameters with the currently used value
  */
 struct sta_info {
        /* General information, mostly static */
index 5f827a6b0d8d1f7c494424be701a95ff1b6bf701..e453212fa17f741bc380b2cbdabe59ee4f6df5d7 100644 (file)
@@ -153,7 +153,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
 
        /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
        if (ieee80211_is_data_qos(hdr->frame_control) &&
-           *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+           *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
                dur = 0;
        else
                /* Time needed to transmit ACK
@@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        __le16 fc;
        struct ieee80211_hdr hdr;
        struct ieee80211s_hdr mesh_hdr __maybe_unused;
-       struct mesh_path __maybe_unused *mppath = NULL;
+       struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
        const u8 *encaps_data;
        int encaps_len, skip_header_bytes;
        int nh_pos, h_pos;
@@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        goto fail;
                }
                rcu_read_lock();
-               if (!is_multicast_ether_addr(skb->data))
-                       mppath = mpp_path_lookup(skb->data, sdata);
+               if (!is_multicast_ether_addr(skb->data)) {
+                       mpath = mesh_path_lookup(skb->data, sdata);
+                       if (!mpath)
+                               mppath = mpp_path_lookup(skb->data, sdata);
+               }
 
                /*
                 * Use address extension if it is a packet from
index 22f2216b397ea37b5845efc359c7d0e519261aec..8dd4712620ff53832a212a3d69791e13cd59905f 100644 (file)
@@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        enum ieee80211_sta_state state;
 
                        for (state = IEEE80211_STA_NOTEXIST;
-                            state < sta->sta_state - 1; state++)
+                            state < sta->sta_state; state++)
                                WARN_ON(drv_sta_state(local, sta->sdata, sta,
                                                      state, state + 1));
                }
@@ -1371,6 +1371,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                }
        }
 
+       /* add back keys */
+       list_for_each_entry(sdata, &local->interfaces, list)
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_enable_keys(sdata);
+
+ wake_up:
        /*
         * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
         * sessions can be established after a resume.
@@ -1392,12 +1398,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                mutex_unlock(&local->sta_mtx);
        }
 
-       /* add back keys */
-       list_for_each_entry(sdata, &local->interfaces, list)
-               if (ieee80211_sdata_running(sdata))
-                       ieee80211_enable_keys(sdata);
-
- wake_up:
        ieee80211_wake_queues_by_reason(hw,
                        IEEE80211_QUEUE_STOP_REASON_SUSPEND);
 
index 8781d8f904d94940880ba90e1fd247c33b4b1aa2..434b6873b352a9e384fd02b2e607bc1fab17895e 100644 (file)
@@ -83,9 +83,10 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
 {
        struct xmit_work *work;
 
-       if (!(priv->phy->channels_supported[page] & (1 << chan)))
+       if (!(priv->phy->channels_supported[page] & (1 << chan))) {
                WARN_ON(1);
                return NETDEV_TX_OK;
+       }
 
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc = crc_ccitt(0, skb->data, skb->len);
index 819c342f5b3012b6a4e37f60414fb835fc0041d8..9730882697aaedbab0beee66f0f12a654b97b63a 100644 (file)
@@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
        return 0;
 }
 
+static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       return -EOPNOTSUPP;
+}
+
 static int
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
@@ -1539,6 +1547,10 @@ nlmsg_failure:
 }
 
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_NONE]        = {
+               .call           = ip_set_none,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+       },
        [IPSET_CMD_CREATE]      = {
                .call           = ip_set_create,
                .attr_count     = IPSET_ATTR_CMD_MAX,
index ee863943c8267286e4b16e52400dd40bf51b4f26..d5d3607ae7bcf5e9704bd189217115cf048aee4b 100644 (file)
@@ -38,30 +38,6 @@ struct iface_node {
 
 #define iface_data(n)  (rb_entry(n, struct iface_node, node)->iface)
 
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
-       const long *a = (const long *)_a;
-       const long *b = (const long *)_b;
-
-       BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
-       if (a[0] != b[0])
-               return a[0] - b[0];
-       if (IFNAMSIZ > sizeof(long)) {
-               if (a[1] != b[1])
-                       return a[1] - b[1];
-       }
-       if (IFNAMSIZ > 2 * sizeof(long)) {
-               if (a[2] != b[2])
-                       return a[2] - b[2];
-       }
-       if (IFNAMSIZ > 3 * sizeof(long)) {
-               if (a[3] != b[3])
-                       return a[3] - b[3];
-       }
-       return 0;
-}
-
 static void
 rbtree_destroy(struct rb_root *root)
 {
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
 
        while (n) {
                const char *d = iface_data(n);
-               long res = ifname_compare(*iface, d);
+               int res = strcmp(*iface, d);
 
                if (res < 0)
                        n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
 
        while (*n) {
                char *ifname = iface_data(*n);
-               long res = ifname_compare(*iface, ifname);
+               int res = strcmp(*iface, ifname);
 
                p = *n;
                if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface4_elem data = { .cidr = HOST_MASK };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = { .cidr = HOST_MASK };
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
index dd811b8dd97c66a9da387a4e37073c47e8dc4407..84444dda194b61806efaeadde4c2458f502d8283 100644 (file)
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
-                                   const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+                                    const struct in6_addr *addr)
 {
-       struct rt6_info *rt;
        struct flowi6 fl6 = {
                .daddr = *addr,
        };
+       struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+       bool is_local;
 
-       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-       if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
-               return 1;
+       is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
 
-       return 0;
+       dst_release(dst);
+       return is_local;
 }
 #endif
 
@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
 {
        struct net_device *dev = ptr;
        struct net *net = dev_net(dev);
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_service *svc;
        struct ip_vs_dest *dest;
        unsigned int idx;
 
-       if (event != NETDEV_UNREGISTER)
+       if (event != NETDEV_UNREGISTER || !ipvs)
                return NOTIFY_DONE;
        IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
        EnterFunction(2);
@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
                }
        }
 
-       list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
+       list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
                __ip_vs_dev_reset(dest, dev);
        }
        mutex_unlock(&__ip_vs_mutex);
index 46d69d7f1bb4e15a3116c389131e23fd9a8d28d6..31f50bc3a3124a111ce0c2f75f4f9e22f095243f 100644 (file)
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                return 0;
 
        /* RTP port is even */
-       port &= htons(~1);
-       rtp_port = port;
-       rtcp_port = htons(ntohs(port) + 1);
+       rtp_port = port & ~htons(1);
+       rtcp_port = port | htons(1);
 
        /* Create expect for RTP */
        if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
index 3e797d1fcb94272ad96b9798e91a6e6467031379..791d56bbd74a7613e4b1d87ba74bcf2a8a135e46 100644 (file)
@@ -169,8 +169,10 @@ replay:
 
                err = nla_parse(cda, ss->cb[cb_id].attr_count,
                                attr, attrlen, ss->cb[cb_id].policy);
-               if (err < 0)
+               if (err < 0) {
+                       rcu_read_unlock();
                        return err;
+               }
 
                if (nc->call_rcu) {
                        err = nc->call_rcu(net->nfnl, skb, nlh,
index 0a96a43108edde1dcdc251a168f0547c9f277378..1686ca1b53a157d8568ae8ac104aeb0f9415d696 100644 (file)
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK");
 MODULE_ALIAS("ip6t_HMARK");
 
 struct hmark_tuple {
-       u32                     src;
-       u32                     dst;
+       __be32                  src;
+       __be32                  dst;
        union hmark_ports       uports;
-       uint8_t                 proto;
+       u8                      proto;
 };
 
-static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
+static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
 {
        return (addr32[0] & mask[0]) ^
               (addr32[1] & mask[1]) ^
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
               (addr32[3] & mask[3]);
 }
 
-static inline u32
-hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
+static inline __be32
+hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
 {
        switch (l3num) {
        case AF_INET:
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
        return 0;
 }
 
+static inline void hmark_swap_ports(union hmark_ports *uports,
+                                   const struct xt_hmark_info *info)
+{
+       union hmark_ports hp;
+       u16 src, dst;
+
+       hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
+       src = ntohs(hp.b16.src);
+       dst = ntohs(hp.b16.dst);
+
+       if (dst > src)
+               uports->v32 = (dst << 16) | src;
+       else
+               uports->v32 = (src << 16) | dst;
+}
+
 static int
 hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
                    const struct xt_hmark_info *info)
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
        otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
        rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
 
-       t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
-                                info->src_mask.all);
-       t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
-                                info->dst_mask.all);
+       t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
+                                info->src_mask.ip6);
+       t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
+                                info->dst_mask.ip6);
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
 
        t->proto = nf_ct_protonum(ct);
        if (t->proto != IPPROTO_ICMP) {
-               t->uports.p16.src = otuple->src.u.all;
-               t->uports.p16.dst = rtuple->src.u.all;
-               t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-                               info->port_set.v32;
-               if (t->uports.p16.dst < t->uports.p16.src)
-                       swap(t->uports.p16.dst, t->uports.p16.src);
+               t->uports.b16.src = otuple->src.u.all;
+               t->uports.b16.dst = rtuple->src.u.all;
+               hmark_swap_ports(&t->uports, info);
        }
 
        return 0;
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
 #endif
 }
 
+/* This hash function is endian independent, to ensure consistent hashing if
+ * the cluster is composed of big and little endian systems. */
 static inline u32
 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
 {
        u32 hash;
+       u32 src = ntohl(t->src);
+       u32 dst = ntohl(t->dst);
 
-       if (t->dst < t->src)
-               swap(t->src, t->dst);
+       if (dst < src)
+               swap(src, dst);
 
-       hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
+       hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
        hash = hash ^ (t->proto & info->proto_mask);
 
        return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
        if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
                return;
 
-       t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-                       info->port_set.v32;
-
-       if (t->uports.p16.dst < t->uports.p16.src)
-               swap(t->uports.p16.dst, t->uports.p16.src);
+       hmark_swap_ports(&t->uports, info);
 }
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
                        return -1;
        }
 noicmp:
-       t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
-       t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
+       t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
+       t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
                }
        }
 
-       t->src = (__force u32) ip->saddr;
-       t->dst = (__force u32) ip->daddr;
-
-       t->src &= info->src_mask.ip;
-       t->dst &= info->dst_mask.ip;
+       t->src = ip->saddr & info->src_mask.ip;
+       t->dst = ip->daddr & info->dst_mask.ip;
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
index 035960ec5cb9c8f51b51e48d6909ebb112047f93..c6f7db720d84f4650e975a952054c04d15868fc3 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
                info->del_set.flags, 0, UINT_MAX);
 
        /* Normalize to fit into jiffies */
-       if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+       if (add_opt.timeout != IPSET_NO_TIMEOUT &&
+           add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
                add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
        if (info->add_set.index != IPSET_INVALID_ID)
                ip_set_add(info->add_set.index, skb, par, &add_opt);
index 8340ace837f2eb309a707d84a8d71da0fa282bbd..2cc7c1ee769046c1b45ce677caa6f8dac3e4a1ff 100644 (file)
@@ -836,7 +836,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
 #ifdef CONFIG_MODULES
                if (res == NULL) {
                        genl_unlock();
-                       request_module("net-pf-%d-proto-%d-type-%s",
+                       request_module("net-pf-%d-proto-%d-family-%s",
                                       PF_NETLINK, NETLINK_GENERIC, name);
                        genl_lock();
                        res = genl_family_find_byname(name);
index 3f339b19d140d666328b5dfd462f5d27bafb94d5..e06d458fc7197ff73e1f1f6f155c154c330cfa67 100644 (file)
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        pr_debug("%p\n", sk);
 
+       if (llcp_sock == NULL || llcp_sock->dev == NULL)
+               return -EBADFD;
+
        addr->sa_family = AF_NFC;
        *len = sizeof(struct sockaddr_nfc_llcp);
 
index cb2646179e5f8b8d1836499579d1ccfd109fad60..2ab196a9f228ac873238c2a060685bacea62b87a 100644 (file)
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
-       nfca_poll->nfcid1_len = *data++;
+       nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
 
        pr_debug("sens_res 0x%x, nfcid1_len %d\n",
                 nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
                        struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
                                                     __u8 *data)
 {
-       nfcb_poll->sensb_res_len = *data++;
+       nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
 
        pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
 
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
                                                     __u8 *data)
 {
        nfcf_poll->bit_rate = *data++;
-       nfcf_poll->sensf_res_len = *data++;
+       nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
 
        pr_debug("bit_rate %d, sensf_res_len %d\n",
                 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -331,7 +331,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        switch (ntf->activation_rf_tech_and_mode) {
        case NCI_NFC_A_PASSIVE_POLL_MODE:
                nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
-               nfca_poll->rats_res_len = *data++;
+               nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
                pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
                if (nfca_poll->rats_res_len > 0) {
                        memcpy(nfca_poll->rats_res,
@@ -341,7 +341,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
 
        case NCI_NFC_B_PASSIVE_POLL_MODE:
                nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
-               nfcb_poll->attrib_res_len = *data++;
+               nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
                pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
                if (nfcb_poll->attrib_res_len > 0) {
                        memcpy(nfcb_poll->attrib_res,
index ec1134c9e07fcd34fc5d6116e1a4aef9dedf6f23..8b8a6a2b2badaf61e9c71a174809ca989438668f 100644 (file)
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       pr_debug("sock=%p\n", sock);
+       pr_debug("sock=%p sk=%p\n", sock, sk);
+
+       if (!sk)
+               return 0;
 
        sock_orphan(sk);
        sock_put(sk);
index 779ce4ff92ec9e5a7de8ce5123b6c6dbc7d798e3..5a940dbd74a3bbf6ec3cadb82cbe47a4dbee8f15 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index bf35b4e1a14c02dfe8f3c8f03dbcb19084bcaaf7..12c30f3e643e00e3fa495cfc7d471e0078b00114 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index d01208968c830c5fa7a7484ea15ae059fc7015d6..a2fba7edfd1f433546a65b5ae386d97b80e16caa 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 9dd4f926f7d15ec4f03786d2ebcaa8070d9954b3..576f22c9c76e82630ba7e4495df1db583cf63b01 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 36f75a9e2c3d1fd248d1102a268351aa05a2c19b..5bf6341e2dd47b1f1a0f6dd3a7a3f9c9c28562c3 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index cfdf135fcd69332e9cb497315f9b6beb39097126..7dd762a464e55f9ef7f41c83b433aa81b6f4d7cf 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Remi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 89cfa9ce49395cfe2e64c738e249ba7891453eaa..0acc943f713a94c5d695cd150fa445e906eba751 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 696348fd31a11300f0346007b124cb55d8f10481..d6bbbbd0af182352b41111ce9a2eefb22ef9be85 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index edfaaaf164ebfab0eebda8b509d91fed1735daee..8d2b3d5a7c21e5ffb2063621a569e1f6280f52ad 100644 (file)
@@ -186,8 +186,7 @@ struct rds_ib_device {
        struct work_struct      free_work;
 };
 
-#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
-#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
 
 /* bits for i_ack_flags */
index 2754f098d43633f80c0a39b7c34a35296d5523ea..bebaa43484bcdbf72bbaaa764559051aeb8efb99 100644 (file)
@@ -229,7 +229,7 @@ found_UDP_peer:
        return peer;
 
 new_UDP_peer:
-       _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
+       _net("Rx UDP DGRAM from NEW peer");
        read_unlock_bh(&rxrpc_peer_lock);
        _leave(" = -EBUSY [new]");
        return ERR_PTR(-EBUSY);
index 8522a4793374136fa4ab66aa9b325b48019801ac..ca8e0a57d945dabeb51147f338cef363672040d5 100644 (file)
@@ -16,8 +16,6 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
-extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
-
 /*
  * The ATM queuing discipline provides a framework for invoking classifiers
  * (aka "filters"), which in turn select classes of this queuing discipline.
index a2a95aabf9c22bd3b0b0bbbe558feb1c1f65430a..c412ad0d0308ed8aedee1c5ce69bce323a32b823 100644 (file)
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
        return PSCHED_NS2TICKS(ticks);
 }
 
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
        struct sk_buff_head *list = &sch->q;
        psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
-       struct sk_buff *skb;
-
-       if (likely(skb_queue_len(list) < sch->limit)) {
-               skb = skb_peek_tail(list);
-               /* Optimize for add at tail */
-               if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
-                       return qdisc_enqueue_tail(nskb, sch);
+       struct sk_buff *skb = skb_peek_tail(list);
 
-               skb_queue_reverse_walk(list, skb) {
-                       if (tnext >= netem_skb_cb(skb)->time_to_send)
-                               break;
-               }
+       /* Optimize for add at tail */
+       if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+               return __skb_queue_tail(list, nskb);
 
-               __skb_queue_after(list, skb, nskb);
-               sch->qstats.backlog += qdisc_pkt_len(nskb);
-               return NET_XMIT_SUCCESS;
+       skb_queue_reverse_walk(list, skb) {
+               if (tnext >= netem_skb_cb(skb)->time_to_send)
+                       break;
        }
 
-       return qdisc_reshape_fail(nskb, sch);
+       __skb_queue_after(list, skb, nskb);
 }
 
 /*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        /* We don't fill cb now as skb_unshare() may invalidate it */
        struct netem_skb_cb *cb;
        struct sk_buff *skb2;
-       int ret;
        int count = 1;
 
        /* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
        }
 
+       if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+               return qdisc_reshape_fail(skb, sch);
+
+       sch->qstats.backlog += qdisc_pkt_len(skb);
+
        cb = netem_skb_cb(skb);
        if (q->gap == 0 ||              /* not doing reordering */
            q->counter < q->gap - 1 ||  /* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
                cb->time_to_send = now + delay;
                ++q->counter;
-               ret = tfifo_enqueue(skb, sch);
+               tfifo_enqueue(skb, sch);
        } else {
                /*
                 * Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                q->counter = 0;
 
                __skb_queue_head(&sch->q, skb);
-               sch->qstats.backlog += qdisc_pkt_len(skb);
                sch->qstats.requeues++;
-               ret = NET_XMIT_SUCCESS;
-       }
-
-       if (ret != NET_XMIT_SUCCESS) {
-               if (net_xmit_drop_count(ret)) {
-                       sch->qstats.drops++;
-                       return ret;
-               }
        }
 
        return NET_XMIT_SUCCESS;
index 74305c883bd3ee842c535bbc27f003d389207346..30ea4674cabd2a735c47af22239ab006b1c2e826 100644 (file)
@@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
 
        sch->qstats.backlog = q->qdisc->qstats.backlog;
        opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (opts == NULL)
+               goto nla_put_failure;
        if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
        return nla_nest_end(skb, opts);
index 5bc9ab161b373eadf51843ebaa77c527716a2122..b16517ee1aaf7cfad94978ed1181df6432da6f07 100644 (file)
@@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         */
        asoc->peer.sack_needed = 1;
        asoc->peer.sack_cnt = 0;
+       asoc->peer.sack_generation = 1;
 
        /* Assume that the peer will tell us if he recognizes ASCONF
         * as part of INIT exchange.
index 80564fe03024634dc479280a8fe8d60ea643e1cc..8b9b6790a3dfc60b562a3a6ea30dccf248612cc4 100644 (file)
@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 
        epb = &ep->base;
 
-       if (hlist_unhashed(&epb->node))
-               return;
-
        epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
 
        head = &sctp_ep_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
        head = &sctp_assoc_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
index f1b7d4bb591e9b648865c4e096ef838e77678442..6ae47acaaec65ceb898e10e462454e667a8e739e 100644 (file)
@@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
                /* If the SACK timer is running, we have a pending SACK */
                if (timer_pending(timer)) {
                        struct sctp_chunk *sack;
+
+                       if (pkt->transport->sack_generation !=
+                           pkt->transport->asoc->peer.sack_generation)
+                               return retval;
+
                        asoc->a_rwnd = asoc->rwnd;
                        sack = sctp_make_sack(asoc);
                        if (sack) {
index 5942d27b1444c71dc4c7af0c591ec3b1b181a92d..9c90811d11345b6fb4a55b5878a9ec168bd3f17c 100644 (file)
@@ -673,7 +673,9 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
                        sctp_bh_unlock_sock(sk);
                }
+#if IS_ENABLED(CONFIG_IPV6)
 free_next:
+#endif
                list_del(&addrw->list);
                kfree(addrw);
        }
index a85eeeb55dd0022e009895c53a6d8593929b7691..b6de71efb140c538a66e0a7901df2b4402a85bf4 100644 (file)
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
        int len;
        __u32 ctsn;
        __u16 num_gabs, num_dup_tsns;
+       struct sctp_association *aptr = (struct sctp_association *)asoc;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+       struct sctp_transport *trans;
 
        memset(gabs, 0, sizeof(gabs));
        ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
                sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
                                 sctp_tsnmap_get_dups(map));
 
+       /* Once we have a sack generated, check to see what our sack
+        * generation is, if its 0, reset the transports to 0, and reset
+        * the association generation to 1
+        *
+        * The idea is that zero is never used as a valid generation for the
+        * association so no transport will match after a wrap event like this,
+        * Until the next sack
+        */
+       if (++aptr->peer.sack_generation == 0) {
+               list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+                                   transports)
+                       trans->sack_generation = 0;
+               aptr->peer.sack_generation = 1;
+       }
 nodata:
        return retval;
 }
index c96d1a81cf4209f6d3ca9b6762fefa47e75867c3..8716da1a859221dc96d206ed517190e9e9cfa2ca 100644 (file)
@@ -1268,7 +1268,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_REPORT_TSN:
                        /* Record the arrival of a TSN.  */
                        error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                                                cmd->obj.u32);
+                                                cmd->obj.u32, NULL);
                        break;
 
                case SCTP_CMD_REPORT_FWDTSN:
index b3b8a8d813eb663f18a6fd488d3f0472f2fe1263..31c7bfcd9b5872aa136e2b513123bb6d582bdff6 100644 (file)
@@ -1231,8 +1231,14 @@ out_free:
        SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
                          " kaddrs: %p err: %d\n",
                          asoc, kaddrs, err);
-       if (asoc)
+       if (asoc) {
+               /* sctp_primitive_ASSOCIATE may have added this association
+                * To the hash table, try to unhash it, just in case, its a noop
+                * if it wasn't hashed so we're safe
+                */
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
        return err;
 }
 
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        goto out_unlock;
 
 out_free:
-       if (new_asoc)
+       if (new_asoc) {
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
 out_unlock:
        sctp_release_sock(sk);
 
index b026ba0c69922e09eb5c150298f650ea7bb3d1c4..1dcceb6e0ce6c2b9e5f6c0c3abf8075c3e75224c 100644 (file)
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
        peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
        memset(&peer->saddr, 0, sizeof(union sctp_addr));
 
+       peer->sack_generation = 0;
+
        /* From 6.3.1 RTO Calculation:
         *
         * C1) Until an RTT measurement has been made for a packet sent to the
index f1e40cebc981ae7962bb9cd20ae84823b70d43cf..b5fb7c409023ff8adea81d41e68ace60781febae 100644 (file)
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
 
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+                    struct sctp_transport *trans)
 {
        u16 gap;
 
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
                 */
                map->max_tsn_seen++;
                map->cumulative_tsn_ack_point++;
+               if (trans)
+                       trans->sack_generation =
+                               trans->asoc->peer.sack_generation;
                map->base_tsn++;
        } else {
                /* Either we already have a gap, or about to record a gap, so
index 8a84017834c211a840e83c1e39bebdbb001ec5c4..33d894776192205cd4b4a9573ccf70664c723b2d 100644 (file)
@@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * can mark it as received so the tsn_map is updated correctly.
         */
        if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                            ntohl(chunk->subh.data_hdr->tsn)))
+                            ntohl(chunk->subh.data_hdr->tsn),
+                            chunk->transport))
                goto fail_mark;
 
        /* First calculate the padding, so we don't inadvertently
index f2d1de7f2ffbd5a219759f55bdc546f2edbf19b0..f5a6a4f4faf721af4874538093cb003f4efc202c 100644 (file)
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        if (chunk && (freed >= needed)) {
                __u32 tsn;
                tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
                sctp_ulpq_tail_data(ulpq, chunk, gfp);
 
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
index 38f388c39dce89a5e6456514771f70ef975af1c0..107c4528654fd5867b8363ccdf66c648e9202a34 100644 (file)
@@ -381,21 +381,53 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 }
 
 /*
- * We cannot currently handle tokens with rotated data.  We need a
- * generalized routine to rotate the data in place.  It is anticipated
- * that we won't encounter rotated data in the general case.
+ * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
+ * to do more than that, we shift repeatedly.  Kevin Coffman reports
+ * seeing 28 bytes as the value used by Microsoft clients and servers
+ * with AES, so this constant is chosen to allow handling 28 in one pass
+ * without using too much stack space.
+ *
+ * If that proves to a problem perhaps we could use a more clever
+ * algorithm.
  */
-static u32
-rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
+#define LOCAL_BUF_LEN 32u
+
+static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
 {
-       unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
+       char head[LOCAL_BUF_LEN];
+       char tmp[LOCAL_BUF_LEN];
+       unsigned int this_len, i;
+
+       BUG_ON(shift > LOCAL_BUF_LEN);
 
-       if (realrrc == 0)
-               return 0;
+       read_bytes_from_xdr_buf(buf, 0, head, shift);
+       for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
+               this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
+               read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
+               write_bytes_to_xdr_buf(buf, i, tmp, this_len);
+       }
+       write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
+}
 
-       dprintk("%s: cannot process token with rotated data: "
-               "rrc %u, realrrc %u\n", __func__, rrc, realrrc);
-       return 1;
+static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
+{
+       int shifted = 0;
+       int this_shift;
+
+       shift %= buf->len;
+       while (shifted < shift) {
+               this_shift = min(shift - shifted, LOCAL_BUF_LEN);
+               rotate_buf_a_little(buf, this_shift);
+               shifted += this_shift;
+       }
+}
+
+static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
+{
+       struct xdr_buf subbuf;
+
+       xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
+       _rotate_left(&subbuf, shift);
 }
 
 static u32
@@ -495,11 +527,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 
        seqnum = be64_to_cpup((__be64 *)(ptr + 8));
 
-       if (rrc != 0) {
-               err = rotate_left(kctx, offset, buf, rrc);
-               if (err)
-                       return GSS_S_FAILURE;
-       }
+       if (rrc != 0)
+               rotate_left(offset + 16, buf, rrc);
 
        err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
                                        &headskip, &tailskip);
index 28b62dbb6d1e4be36358055a9c231143f0e45d9e..73e95738660042e7a9d4e7cb252143ec74078265 100644 (file)
@@ -336,7 +336,6 @@ struct rsc {
        struct svc_cred         cred;
        struct gss_svc_seq_data seqdata;
        struct gss_ctx          *mechctx;
-       char                    *client_name;
 };
 
 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
@@ -347,9 +346,7 @@ static void rsc_free(struct rsc *rsci)
        kfree(rsci->handle.data);
        if (rsci->mechctx)
                gss_delete_sec_context(&rsci->mechctx);
-       if (rsci->cred.cr_group_info)
-               put_group_info(rsci->cred.cr_group_info);
-       kfree(rsci->client_name);
+       free_svc_cred(&rsci->cred);
 }
 
 static void rsc_put(struct kref *ref)
@@ -387,7 +384,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
        tmp->handle.data = NULL;
        new->mechctx = NULL;
        new->cred.cr_group_info = NULL;
-       new->client_name = NULL;
+       new->cred.cr_principal = NULL;
 }
 
 static void
@@ -402,8 +399,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
        spin_lock_init(&new->seqdata.sd_lock);
        new->cred = tmp->cred;
        tmp->cred.cr_group_info = NULL;
-       new->client_name = tmp->client_name;
-       tmp->client_name = NULL;
+       new->cred.cr_principal = tmp->cred.cr_principal;
+       tmp->cred.cr_principal = NULL;
 }
 
 static struct cache_head *
@@ -501,8 +498,8 @@ static int rsc_parse(struct cache_detail *cd,
                /* get client name */
                len = qword_get(&mesg, buf, mlen);
                if (len > 0) {
-                       rsci.client_name = kstrdup(buf, GFP_KERNEL);
-                       if (!rsci.client_name)
+                       rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
+                       if (!rsci.cred.cr_principal)
                                goto out;
                }
 
@@ -932,16 +929,6 @@ struct gss_svc_data {
        struct rsc                      *rsci;
 };
 
-char *svc_gss_principal(struct svc_rqst *rqstp)
-{
-       struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
-
-       if (gd && gd->rsci)
-               return gd->rsci->client_name;
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(svc_gss_principal);
-
 static int
 svcauth_gss_set_client(struct svc_rqst *rqstp)
 {
@@ -969,16 +956,17 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
 }
 
 static inline int
-gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi *rsip)
+gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
+               struct xdr_netobj *out_handle, int *major_status)
 {
        struct rsc *rsci;
        int        rc;
 
-       if (rsip->major_status != GSS_S_COMPLETE)
+       if (*major_status != GSS_S_COMPLETE)
                return gss_write_null_verf(rqstp);
-       rsci = gss_svc_searchbyctx(cd, &rsip->out_handle);
+       rsci = gss_svc_searchbyctx(cd, out_handle);
        if (rsci == NULL) {
-               rsip->major_status = GSS_S_NO_CONTEXT;
+               *major_status = GSS_S_NO_CONTEXT;
                return gss_write_null_verf(rqstp);
        }
        rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
@@ -986,22 +974,13 @@ gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi
        return rc;
 }
 
-/*
- * Having read the cred already and found we're in the context
- * initiation case, read the verifier and initiate (or check the results
- * of) upcalls to userspace for help with context initiation.  If
- * the upcall results are available, write the verifier and result.
- * Otherwise, drop the request pending an answer to the upcall.
- */
-static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
-                       struct rpc_gss_wire_cred *gc, __be32 *authp)
+static inline int
+gss_read_verf(struct rpc_gss_wire_cred *gc,
+             struct kvec *argv, __be32 *authp,
+             struct xdr_netobj *in_handle,
+             struct xdr_netobj *in_token)
 {
-       struct kvec *argv = &rqstp->rq_arg.head[0];
-       struct kvec *resv = &rqstp->rq_res.head[0];
        struct xdr_netobj tmpobj;
-       struct rsi *rsip, rsikey;
-       int ret;
-       struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
 
        /* Read the verifier; should be NULL: */
        *authp = rpc_autherr_badverf;
@@ -1011,24 +990,67 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
                return SVC_DENIED;
        if (svc_getnl(argv) != 0)
                return SVC_DENIED;
-
        /* Martial context handle and token for upcall: */
        *authp = rpc_autherr_badcred;
        if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
                return SVC_DENIED;
-       memset(&rsikey, 0, sizeof(rsikey));
-       if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
+       if (dup_netobj(in_handle, &gc->gc_ctx))
                return SVC_CLOSE;
        *authp = rpc_autherr_badverf;
        if (svc_safe_getnetobj(argv, &tmpobj)) {
-               kfree(rsikey.in_handle.data);
+               kfree(in_handle->data);
                return SVC_DENIED;
        }
-       if (dup_netobj(&rsikey.in_token, &tmpobj)) {
-               kfree(rsikey.in_handle.data);
+       if (dup_netobj(in_token, &tmpobj)) {
+               kfree(in_handle->data);
                return SVC_CLOSE;
        }
 
+       return 0;
+}
+
+static inline int
+gss_write_resv(struct kvec *resv, size_t size_limit,
+              struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
+              int major_status, int minor_status)
+{
+       if (resv->iov_len + 4 > size_limit)
+               return -1;
+       svc_putnl(resv, RPC_SUCCESS);
+       if (svc_safe_putnetobj(resv, out_handle))
+               return -1;
+       if (resv->iov_len + 3 * 4 > size_limit)
+               return -1;
+       svc_putnl(resv, major_status);
+       svc_putnl(resv, minor_status);
+       svc_putnl(resv, GSS_SEQ_WIN);
+       if (svc_safe_putnetobj(resv, out_token))
+               return -1;
+       return 0;
+}
+
+/*
+ * Having read the cred already and found we're in the context
+ * initiation case, read the verifier and initiate (or check the results
+ * of) upcalls to userspace for help with context initiation.  If
+ * the upcall results are available, write the verifier and result.
+ * Otherwise, drop the request pending an answer to the upcall.
+ */
+static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
+                       struct rpc_gss_wire_cred *gc, __be32 *authp)
+{
+       struct kvec *argv = &rqstp->rq_arg.head[0];
+       struct kvec *resv = &rqstp->rq_res.head[0];
+       struct rsi *rsip, rsikey;
+       int ret;
+       struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+
+       memset(&rsikey, 0, sizeof(rsikey));
+       ret = gss_read_verf(gc, argv, authp,
+                           &rsikey.in_handle, &rsikey.in_token);
+       if (ret)
+               return ret;
+
        /* Perform upcall, or find upcall result: */
        rsip = rsi_lookup(sn->rsi_cache, &rsikey);
        rsi_free(&rsikey);
@@ -1040,19 +1062,12 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
 
        ret = SVC_CLOSE;
        /* Got an answer to the upcall; use it: */
-       if (gss_write_init_verf(sn->rsc_cache, rqstp, rsip))
-               goto out;
-       if (resv->iov_len + 4 > PAGE_SIZE)
+       if (gss_write_init_verf(sn->rsc_cache, rqstp,
+                               &rsip->out_handle, &rsip->major_status))
                goto out;
-       svc_putnl(resv, RPC_SUCCESS);
-       if (svc_safe_putnetobj(resv, &rsip->out_handle))
-               goto out;
-       if (resv->iov_len + 3 * 4 > PAGE_SIZE)
-               goto out;
-       svc_putnl(resv, rsip->major_status);
-       svc_putnl(resv, rsip->minor_status);
-       svc_putnl(resv, GSS_SEQ_WIN);
-       if (svc_safe_putnetobj(resv, &rsip->out_token))
+       if (gss_write_resv(resv, PAGE_SIZE,
+                          &rsip->out_handle, &rsip->out_token,
+                          rsip->major_status, rsip->minor_status))
                goto out;
 
        ret = SVC_COMPLETE;
@@ -1192,7 +1207,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
                }
                svcdata->rsci = rsci;
                cache_get(&rsci->h);
-               rqstp->rq_flavor = gss_svc_to_pseudoflavor(
+               rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
                                        rsci->mechctx->mech_type, gc->gc_svc);
                ret = SVC_OK;
                goto out;
index 04040476082e6efd5ef08f9c7e6444c0fec77929..21fde99e5c56e4ed6413d51f46a3945bff67f7d9 100644 (file)
@@ -71,7 +71,9 @@ static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head,
                msg->errno = err;
                destroy_msg(msg);
        } while (!list_empty(head));
-       wake_up(waitq);
+
+       if (waitq)
+               wake_up(waitq);
 }
 
 static void
@@ -91,11 +93,9 @@ rpc_timeout_upcall_queue(struct work_struct *work)
        }
        dentry = dget(pipe->dentry);
        spin_unlock(&pipe->lock);
-       if (dentry) {
-               rpc_purge_list(&RPC_I(dentry->d_inode)->waitq,
-                              &free_list, destroy_msg, -ETIMEDOUT);
-               dput(dentry);
-       }
+       rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
+                       &free_list, destroy_msg, -ETIMEDOUT);
+       dput(dentry);
 }
 
 ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
index 3c0653439f3dc398031301d53819c3b6e78bc4ef..92509ffe15fcacce5de331cbb205a84c4f718a86 100644 (file)
@@ -180,14 +180,16 @@ void rpcb_put_local(struct net *net)
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        struct rpc_clnt *clnt = sn->rpcb_local_clnt;
        struct rpc_clnt *clnt4 = sn->rpcb_local_clnt4;
-       int shutdown;
+       int shutdown = 0;
 
        spin_lock(&sn->rpcb_clnt_lock);
-       if (--sn->rpcb_users == 0) {
-               sn->rpcb_local_clnt = NULL;
-               sn->rpcb_local_clnt4 = NULL;
+       if (sn->rpcb_users) {
+               if (--sn->rpcb_users == 0) {
+                       sn->rpcb_local_clnt = NULL;
+                       sn->rpcb_local_clnt4 = NULL;
+               }
+               shutdown = !sn->rpcb_users;
        }
-       shutdown = !sn->rpcb_users;
        spin_unlock(&sn->rpcb_clnt_lock);
 
        if (shutdown) {
index 017c0117d1543a784dfe5130396c74f80879131a..3ee7461926d8a01318ed2876e37651506c6b3c4b 100644 (file)
@@ -407,6 +407,14 @@ static int svc_uses_rpcbind(struct svc_serv *serv)
        return 0;
 }
 
+int svc_bind(struct svc_serv *serv, struct net *net)
+{
+       if (!svc_uses_rpcbind(serv))
+               return 0;
+       return svc_rpcb_setup(serv, net);
+}
+EXPORT_SYMBOL_GPL(svc_bind);
+
 /*
  * Create an RPC service
  */
@@ -471,15 +479,8 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
                spin_lock_init(&pool->sp_lock);
        }
 
-       if (svc_uses_rpcbind(serv)) {
-               if (svc_rpcb_setup(serv, current->nsproxy->net_ns) < 0) {
-                       kfree(serv->sv_pools);
-                       kfree(serv);
-                       return NULL;
-               }
-               if (!serv->sv_shutdown)
-                       serv->sv_shutdown = svc_rpcb_cleanup;
-       }
+       if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
+               serv->sv_shutdown = svc_rpcb_cleanup;
 
        return serv;
 }
@@ -536,8 +537,6 @@ EXPORT_SYMBOL_GPL(svc_shutdown_net);
 void
 svc_destroy(struct svc_serv *serv)
 {
-       struct net *net = current->nsproxy->net_ns;
-
        dprintk("svc: svc_destroy(%s, %d)\n",
                                serv->sv_program->pg_name,
                                serv->sv_nrthreads);
@@ -552,8 +551,6 @@ svc_destroy(struct svc_serv *serv)
 
        del_timer_sync(&serv->sv_temptimer);
 
-       svc_shutdown_net(serv, net);
-
        /*
         * The last user is gone and thus all sockets have to be destroyed to
         * the point. Check this.
@@ -1377,7 +1374,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
                                                sizeof(req->rq_snd_buf));
                return bc_send(req);
        } else {
-               /* Nothing to do to drop request */
+               /* drop request */
+               xprt_free_bc_request(req);
                return 0;
        }
 }
index b98ee35149121602b42ace9365bfd5f21e84767a..88f2bf671960d444e73d3d9eba2998f75ac2885b 100644 (file)
@@ -598,6 +598,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 
        /* now allocate needed pages.  If we get a failure, sleep briefly */
        pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
+       BUG_ON(pages >= RPCSVC_MAXPAGES);
        for (i = 0; i < pages ; i++)
                while (rqstp->rq_pages[i] == NULL) {
                        struct page *p = alloc_page(GFP_KERNEL);
@@ -612,7 +613,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                        rqstp->rq_pages[i] = p;
                }
        rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
-       BUG_ON(pages >= RPCSVC_MAXPAGES);
 
        /* Make arg->head point to first page and arg->pages point to rest */
        arg = &rqstp->rq_arg;
@@ -973,7 +973,7 @@ void svc_close_net(struct svc_serv *serv, struct net *net)
        svc_clear_pools(serv, net);
        /*
         * At this point the sp_sockets lists will stay empty, since
-        * svc_enqueue will not add new entries without taking the
+        * svc_xprt_enqueue will not add new entries without taking the
         * sp_lock and checking XPT_BUSY.
         */
        svc_clear_list(&serv->sv_tempsocks, net);
index 71ec8530ec8cb7ac10abae2fbf6c2571b14c8c98..2777fa896645de3f063aa5ad67cb054bbb75a894 100644 (file)
@@ -347,17 +347,12 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
        return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
 }
 
-
-void svcauth_unix_purge(void)
+void svcauth_unix_purge(struct net *net)
 {
-       struct net *net;
-
-       for_each_net(net) {
-               struct sunrpc_net *sn;
+       struct sunrpc_net *sn;
 
-               sn = net_generic(net, sunrpc_net_id);
-               cache_purge(sn->ip_map_cache);
-       }
+       sn = net_generic(net, sunrpc_net_id);
+       cache_purge(sn->ip_map_cache);
 }
 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
 
@@ -751,6 +746,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
        struct svc_cred *cred = &rqstp->rq_cred;
 
        cred->cr_group_info = NULL;
+       cred->cr_principal = NULL;
        rqstp->rq_client = NULL;
 
        if (argv->iov_len < 3*4)
@@ -778,7 +774,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
        svc_putnl(resv, RPC_AUTH_NULL);
        svc_putnl(resv, 0);
 
-       rqstp->rq_flavor = RPC_AUTH_NULL;
+       rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
        return SVC_OK;
 }
 
@@ -816,6 +812,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
        int             len   = argv->iov_len;
 
        cred->cr_group_info = NULL;
+       cred->cr_principal = NULL;
        rqstp->rq_client = NULL;
 
        if ((len -= 3*4) < 0)
@@ -852,7 +849,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
        svc_putnl(resv, RPC_AUTH_NULL);
        svc_putnl(resv, 0);
 
-       rqstp->rq_flavor = RPC_AUTH_UNIX;
+       rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
        return SVC_OK;
 
 badcred:
index 61ceae0b956607cf0b7fa88b61f22bc79b1f673f..a157a2e64e18de17e33ae1c515e5b1501553b256 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config WAN_ROUTER
-       tristate "WAN router"
+       tristate "WAN router (DEPRECATED)"
        depends on EXPERIMENTAL
        ---help---
          Wide Area Networks (WANs), such as X.25, frame relay and leased
index d2a19b0ff71f134544ac1afc65dd5d3fe3bc4891..89baa3328411485ac4748aa9e0f9d097fdd30a97 100644 (file)
@@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
        cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
+       wdev->sme_state = CFG80211_SME_CONNECTED;
        cfg80211_upload_connect_keys(wdev);
 
        nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
@@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
        struct cfg80211_event *ev;
        unsigned long flags;
 
-       CFG80211_DEV_WARN_ON(!wdev->ssid_len);
+       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
 
        ev = kzalloc(sizeof(*ev), gfp);
        if (!ev)
@@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.channel = params->channel;
 #endif
+       wdev->sme_state = CFG80211_SME_CONNECTING;
        err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
        if (err) {
                wdev->connect_keys = NULL;
+               wdev->sme_state = CFG80211_SME_IDLE;
                return err;
        }
 
@@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
        }
 
        wdev->current_bss = NULL;
+       wdev->sme_state = CFG80211_SME_IDLE;
        wdev->ssid_len = 0;
 #ifdef CONFIG_CFG80211_WEXT
        if (!nowext)
index 15f347477a9953fb85acc494436cd2763cd266a9..baf5704740ee62080e8dc4a16de8cb71503b7669 100644 (file)
@@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void)
        spin_unlock(&reg_requests_lock);
 
        if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
-               cancel_delayed_work_sync(&reg_timeout);
+               cancel_delayed_work(&reg_timeout);
 
        if (need_more_processing)
                schedule_work(&reg_work);
index 55d99466babb1d8060dc09d841315e9d366ecc21..316cfd00914fe8df7d6510b13aeaf4165a9d6d88 100644 (file)
@@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
             ntype == NL80211_IFTYPE_P2P_CLIENT))
                return -EBUSY;
 
-       if (ntype != otype) {
+       if (ntype != otype && netif_running(dev)) {
                err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
                                                    ntype);
                if (err)
@@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                  enum nl80211_iftype iftype)
 {
        struct wireless_dev *wdev_iter;
+       u32 used_iftypes = BIT(iftype);
        int num[NUM_NL80211_IFTYPES];
        int total = 1;
        int i, j;
@@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
 
                num[wdev_iter->iftype]++;
                total++;
+               used_iftypes |= BIT(wdev_iter->iftype);
        }
        mutex_unlock(&rdev->devlist_mtx);
 
@@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
        for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
                const struct ieee80211_iface_combination *c;
                struct ieee80211_iface_limit *limits;
+               u32 all_iftypes = 0;
 
                c = &rdev->wiphy.iface_combinations[i];
 
@@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                        if (rdev->wiphy.software_iftypes & BIT(iftype))
                                continue;
                        for (j = 0; j < c->n_limits; j++) {
+                               all_iftypes |= limits[j].types;
                                if (!(limits[j].types & BIT(iftype)))
                                        continue;
                                if (limits[j].max < num[iftype])
@@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                limits[j].max -= num[iftype];
                        }
                }
-               /* yay, it fits */
+
+               /*
+                * Finally check that all iftypes that we're currently
+                * using are actually part of this combination. If they
+                * aren't then we can't use this combination and have
+                * to continue to the next.
+                */
+               if ((all_iftypes & used_iftypes) != used_iftypes)
+                       goto cont;
+
+               /*
+                * This combination covered all interface types and
+                * supported the requested numbers, so we're good.
+                */
                kfree(limits);
                return 0;
  cont:
index c53e8f42aa7506b897464a3716e282b0c837a549..ccfbd328a69d7948736157555bfa857236875156 100644 (file)
@@ -1921,6 +1921,9 @@ no_transform:
        }
 ok:
        xfrm_pols_put(pols, drop_pols);
+       if (dst && dst->xfrm &&
+           dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+               dst->flags |= DST_XFRM_TUNNEL;
        return dst;
 
 nopol:
index faea0ec612bfed2932ca5dc25868fe00888a5afc..e5bd60ff48e3d553ef3921256123519df7b812b8 100755 (executable)
@@ -2382,6 +2382,19 @@ sub process {
                        }
                }
 
+               if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
+                       my $orig = $1;
+                       my $level = lc($orig);
+                       $level = "warn" if ($level eq "warning");
+                       WARN("PREFER_PR_LEVEL",
+                            "Prefer pr_$level(... to printk(KERN_$1, ...\n" . $herecurr);
+               }
+
+               if ($line =~ /\bpr_warning\s*\(/) {
+                       WARN("PREFER_PR_LEVEL",
+                            "Prefer pr_warn(... to pr_warning(...\n" . $herecurr);
+               }
+
 # function brace can't be on same line, except for #defines of do while,
 # or if closed on same line
                if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
@@ -2448,6 +2461,13 @@ sub process {
                                     "space prohibited between function name and open parenthesis '('\n" . $herecurr);
                        }
                }
+
+# check for whitespace before a non-naked semicolon
+               if ($line =~ /^\+.*\S\s+;/) {
+                       CHK("SPACING",
+                           "space prohibited before semicolon\n" . $herecurr);
+               }
+
 # Check operator spacing.
                if (!($line=~/\#\s*include/)) {
                        my $ops = qr{
index 0948c6b5a32158cc4a28736d1c6924a4b79beded..8b673dd4627fb20f4ab16a8af6de9524bd84a426 100755 (executable)
@@ -83,6 +83,8 @@ push(@signature_tags, "Signed-off-by:");
 push(@signature_tags, "Reviewed-by:");
 push(@signature_tags, "Acked-by:");
 
+my $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+
 # rfc822 email address - preloaded methods go here.
 my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
 my $rfc822_char = '[\\000-\\377]';
@@ -473,7 +475,6 @@ my @subsystem = ();
 my @status = ();
 my %deduplicate_name_hash = ();
 my %deduplicate_address_hash = ();
-my $signature_pattern;
 
 my @maintainers = get_maintainers();
 
old mode 100644 (file)
new mode 100755 (executable)
index 032daab449b0bb3007562e795593a15d247a2c58..8ea39aabe94889a224c868757196afc084c578b8 100644 (file)
@@ -490,17 +490,9 @@ static int common_mmap(int op, struct file *file, unsigned long prot,
        return common_file_perm(op, file, mask);
 }
 
-static int apparmor_file_mmap(struct file *file, unsigned long reqprot,
-                             unsigned long prot, unsigned long flags,
-                             unsigned long addr, unsigned long addr_only)
+static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
+                             unsigned long prot, unsigned long flags)
 {
-       int rc = 0;
-
-       /* do DAC check */
-       rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-       if (rc || addr_only)
-               return rc;
-
        return common_mmap(OP_FMMAP, file, prot, flags);
 }
 
@@ -646,7 +638,8 @@ static struct security_operations apparmor_ops = {
        .file_permission =              apparmor_file_permission,
        .file_alloc_security =          apparmor_file_alloc_security,
        .file_free_security =           apparmor_file_free_security,
-       .file_mmap =                    apparmor_file_mmap,
+       .mmap_file =                    apparmor_mmap_file,
+       .mmap_addr =                    cap_mmap_addr,
        .file_mprotect =                apparmor_file_mprotect,
        .file_lock =                    apparmor_file_lock,
 
index fca889676c5e9e5726f6c3136fb8c26cfe85f63c..61095df8b89ac452d50528144a67dca751d4f992 100644 (file)
@@ -949,7 +949,8 @@ void __init security_fixup_ops(struct security_operations *ops)
        set_to_cap_if_null(ops, file_alloc_security);
        set_to_cap_if_null(ops, file_free_security);
        set_to_cap_if_null(ops, file_ioctl);
-       set_to_cap_if_null(ops, file_mmap);
+       set_to_cap_if_null(ops, mmap_addr);
+       set_to_cap_if_null(ops, mmap_file);
        set_to_cap_if_null(ops, file_mprotect);
        set_to_cap_if_null(ops, file_lock);
        set_to_cap_if_null(ops, file_fcntl);
index e771cb1b2d7947f0c85651b38cc7c9c1d3da11d7..6dbae4650abe20208ff66eb27015e21b964d0344 100644 (file)
@@ -958,22 +958,15 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
 }
 
 /*
- * cap_file_mmap - check if able to map given addr
- * @file: unused
- * @reqprot: unused
- * @prot: unused
- * @flags: unused
+ * cap_mmap_addr - check if able to map given addr
  * @addr: address attempting to be mapped
- * @addr_only: unused
  *
  * If the process is attempting to map memory below dac_mmap_min_addr they need
  * CAP_SYS_RAWIO.  The other parameters to this function are unused by the
  * capability security module.  Returns 0 if this mapping should be allowed
  * -EPERM if not.
  */
-int cap_file_mmap(struct file *file, unsigned long reqprot,
-                 unsigned long prot, unsigned long flags,
-                 unsigned long addr, unsigned long addr_only)
+int cap_mmap_addr(unsigned long addr)
 {
        int ret = 0;
 
@@ -986,3 +979,9 @@ int cap_file_mmap(struct file *file, unsigned long reqprot,
        }
        return ret;
 }
+
+int cap_mmap_file(struct file *file, unsigned long reqprot,
+                 unsigned long prot, unsigned long flags)
+{
+       return 0;
+}
index fab4f8dda6c6fdf6acdebd1385bb39caae4c8b97..c92d42b021aa47c62dea181c6b90129d80ff7e53 100644 (file)
@@ -38,7 +38,7 @@ long compat_keyctl_instantiate_key_iov(
 
        ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
                                           ARRAY_SIZE(iovstack),
-                                          iovstack, &iov, 1);
+                                          iovstack, &iov);
        if (ret < 0)
                return ret;
        if (ret == 0)
index f711b094ed412e723207e5d75ceb86a0c81e4439..3dcbf86b0d31b9c7c9c889c80dacf34f9b33adc6 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/sched.h>
 #include <linux/key-type.h>
+#include <linux/task_work.h>
 
 #ifdef __KDEBUG
 #define kenter(FMT, ...) \
@@ -148,6 +149,7 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
 #define KEY_LOOKUP_FOR_UNLINK  0x04
 
 extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct task_work *twork);
 
 extern struct work_struct key_gc_work;
 extern unsigned key_gc_delay;
index ddb3e05bc5fcd12fae86cf9c60edf6a1f57439c1..0f5b3f0272995dc7057f1306c346468174d3e464 100644 (file)
@@ -84,7 +84,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
        vm = false;
        if (_payload) {
                ret = -ENOMEM;
-               payload = kmalloc(plen, GFP_KERNEL);
+               payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
                if (!payload) {
                        if (plen <= PAGE_SIZE)
                                goto error2;
@@ -1110,7 +1110,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
                goto no_payload;
 
        ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+                                   ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret < 0)
                return ret;
        if (ret == 0)
@@ -1454,50 +1454,57 @@ long keyctl_get_security(key_serial_t keyid,
  */
 long keyctl_session_to_parent(void)
 {
-#ifdef TIF_NOTIFY_RESUME
        struct task_struct *me, *parent;
        const struct cred *mycred, *pcred;
-       struct cred *cred, *oldcred;
+       struct task_work *newwork, *oldwork;
        key_ref_t keyring_r;
+       struct cred *cred;
        int ret;
 
        keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
        if (IS_ERR(keyring_r))
                return PTR_ERR(keyring_r);
 
+       ret = -ENOMEM;
+       newwork = kmalloc(sizeof(struct task_work), GFP_KERNEL);
+       if (!newwork)
+               goto error_keyring;
+
        /* our parent is going to need a new cred struct, a new tgcred struct
         * and new security data, so we allocate them here to prevent ENOMEM in
         * our parent */
-       ret = -ENOMEM;
        cred = cred_alloc_blank();
        if (!cred)
-               goto error_keyring;
+               goto error_newwork;
 
        cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
-       keyring_r = NULL;
+       init_task_work(newwork, key_change_session_keyring, cred);
 
        me = current;
        rcu_read_lock();
        write_lock_irq(&tasklist_lock);
 
-       parent = me->real_parent;
        ret = -EPERM;
+       oldwork = NULL;
+       parent = me->real_parent;
 
        /* the parent mustn't be init and mustn't be a kernel thread */
        if (parent->pid <= 1 || !parent->mm)
-               goto not_permitted;
+               goto unlock;
 
        /* the parent must be single threaded */
        if (!thread_group_empty(parent))
-               goto not_permitted;
+               goto unlock;
 
        /* the parent and the child must have different session keyrings or
         * there's no point */
        mycred = current_cred();
        pcred = __task_cred(parent);
        if (mycred == pcred ||
-           mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
-               goto already_same;
+           mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) {
+               ret = 0;
+               goto unlock;
+       }
 
        /* the parent must have the same effective ownership and mustn't be
         * SUID/SGID */
@@ -1507,50 +1514,40 @@ long keyctl_session_to_parent(void)
            pcred->gid  != mycred->egid ||
            pcred->egid != mycred->egid ||
            pcred->sgid != mycred->egid)
-               goto not_permitted;
+               goto unlock;
 
        /* the keyrings must have the same UID */
        if ((pcred->tgcred->session_keyring &&
             pcred->tgcred->session_keyring->uid != mycred->euid) ||
            mycred->tgcred->session_keyring->uid != mycred->euid)
-               goto not_permitted;
+               goto unlock;
 
-       /* if there's an already pending keyring replacement, then we replace
-        * that */
-       oldcred = parent->replacement_session_keyring;
+       /* cancel an already pending keyring replacement */
+       oldwork = task_work_cancel(parent, key_change_session_keyring);
 
        /* the replacement session keyring is applied just prior to userspace
         * restarting */
-       parent->replacement_session_keyring = cred;
-       cred = NULL;
-       set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
-
-       write_unlock_irq(&tasklist_lock);
-       rcu_read_unlock();
-       if (oldcred)
-               put_cred(oldcred);
-       return 0;
-
-already_same:
-       ret = 0;
-not_permitted:
+       ret = task_work_add(parent, newwork, true);
+       if (!ret)
+               newwork = NULL;
+unlock:
        write_unlock_irq(&tasklist_lock);
        rcu_read_unlock();
-       put_cred(cred);
+       if (oldwork) {
+               put_cred(oldwork->data);
+               kfree(oldwork);
+       }
+       if (newwork) {
+               put_cred(newwork->data);
+               kfree(newwork);
+       }
        return ret;
 
+error_newwork:
+       kfree(newwork);
 error_keyring:
        key_ref_put(keyring_r);
        return ret;
-
-#else /* !TIF_NOTIFY_RESUME */
-       /*
-        * To be removed when TIF_NOTIFY_RESUME has been implemented on
-        * m68k/xtensa
-        */
-#warning TIF_NOTIFY_RESUME not implemented
-       return -EOPNOTSUPP;
-#endif /* !TIF_NOTIFY_RESUME */
 }
 
 /*
index d71056db7b67501a085fd4a8feda5c841dd83094..4ad54eea1ea45554d5d931497671fdb32a33660b 100644 (file)
@@ -834,23 +834,17 @@ error:
  * Replace a process's session keyring on behalf of one of its children when
  * the target  process is about to resume userspace execution.
  */
-void key_replace_session_keyring(void)
+void key_change_session_keyring(struct task_work *twork)
 {
-       const struct cred *old;
-       struct cred *new;
-
-       if (!current->replacement_session_keyring)
-               return;
+       const struct cred *old = current_cred();
+       struct cred *new = twork->data;
 
-       write_lock_irq(&tasklist_lock);
-       new = current->replacement_session_keyring;
-       current->replacement_session_keyring = NULL;
-       write_unlock_irq(&tasklist_lock);
-
-       if (!new)
+       kfree(twork);
+       if (unlikely(current->flags & PF_EXITING)) {
+               put_cred(new);
                return;
+       }
 
-       old = current_cred();
        new->  uid      = old->  uid;
        new-> euid      = old-> euid;
        new-> suid      = old-> suid;
index cc3790315d2f15778fcc3c8a3ec7f2434282277d..000e7501752022089b82efeb153115498e55da60 100644 (file)
@@ -93,16 +93,9 @@ static void umh_keys_cleanup(struct subprocess_info *info)
 static int call_usermodehelper_keys(char *path, char **argv, char **envp,
                                        struct key *session_keyring, int wait)
 {
-       gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-       struct subprocess_info *info =
-               call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
-       if (!info)
-               return -ENOMEM;
-
-       call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
-                                       key_get(session_keyring));
-       return call_usermodehelper_exec(info, wait);
+       return call_usermodehelper_fns(path, argv, envp, wait,
+                                      umh_keys_init, umh_keys_cleanup,
+                                      key_get(session_keyring));
 }
 
 /*
index 5497a57fba0154a24b1b87835930e6cc685f855b..860aeb349cb337bbccf4346d99120d4d1fd51c90 100644 (file)
 #include <linux/ima.h>
 #include <linux/evm.h>
 #include <linux/fsnotify.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/personality.h>
+#include <linux/backing-dev.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR      2
@@ -657,18 +661,56 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return security_ops->file_ioctl(file, cmd, arg);
 }
 
-int security_file_mmap(struct file *file, unsigned long reqprot,
-                       unsigned long prot, unsigned long flags,
-                       unsigned long addr, unsigned long addr_only)
+static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
 {
-       int ret;
+       /*
+        * Does we have PROT_READ and does the application expect
+        * it to imply PROT_EXEC?  If not, nothing to talk about...
+        */
+       if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
+               return prot;
+       if (!(current->personality & READ_IMPLIES_EXEC))
+               return prot;
+       /*
+        * if that's an anonymous mapping, let it.
+        */
+       if (!file)
+               return prot | PROT_EXEC;
+       /*
+        * ditto if it's not on noexec mount, except that on !MMU we need
+        * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
+        */
+       if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
+#ifndef CONFIG_MMU
+               unsigned long caps = 0;
+               struct address_space *mapping = file->f_mapping;
+               if (mapping && mapping->backing_dev_info)
+                       caps = mapping->backing_dev_info->capabilities;
+               if (!(caps & BDI_CAP_EXEC_MAP))
+                       return prot;
+#endif
+               return prot | PROT_EXEC;
+       }
+       /* anything on noexec mount won't get PROT_EXEC */
+       return prot;
+}
 
-       ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
+int security_mmap_file(struct file *file, unsigned long prot,
+                       unsigned long flags)
+{
+       int ret;
+       ret = security_ops->mmap_file(file, prot,
+                                       mmap_prot(file, prot), flags);
        if (ret)
                return ret;
        return ima_file_mmap(file, prot);
 }
 
+int security_mmap_addr(unsigned long addr)
+{
+       return security_ops->mmap_addr(addr);
+}
+
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
                            unsigned long prot)
 {
index fa2341b683314b0c5505f905e6712538555300ad..ffd8900a38e8d7ddb906c6f090bacd1f6e419907 100644 (file)
@@ -2717,7 +2717,7 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
                        ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
                return dentry_has_perm(cred, dentry, FILE__SETATTR);
 
-       if (ia_valid & ATTR_SIZE)
+       if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
                av |= FILE__OPEN;
 
        return dentry_has_perm(cred, dentry, av);
@@ -3083,9 +3083,7 @@ error:
        return rc;
 }
 
-static int selinux_file_mmap(struct file *file, unsigned long reqprot,
-                            unsigned long prot, unsigned long flags,
-                            unsigned long addr, unsigned long addr_only)
+static int selinux_mmap_addr(unsigned long addr)
 {
        int rc = 0;
        u32 sid = current_sid();
@@ -3104,10 +3102,12 @@ static int selinux_file_mmap(struct file *file, unsigned long reqprot,
        }
 
        /* do DAC check on address space usage */
-       rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-       if (rc || addr_only)
-               return rc;
+       return cap_mmap_addr(addr);
+}
 
+static int selinux_mmap_file(struct file *file, unsigned long reqprot,
+                            unsigned long prot, unsigned long flags)
+{
        if (selinux_checkreqprot)
                prot = reqprot;
 
@@ -5570,7 +5570,8 @@ static struct security_operations selinux_ops = {
        .file_alloc_security =          selinux_file_alloc_security,
        .file_free_security =           selinux_file_free_security,
        .file_ioctl =                   selinux_file_ioctl,
-       .file_mmap =                    selinux_file_mmap,
+       .mmap_file =                    selinux_mmap_file,
+       .mmap_addr =                    selinux_mmap_addr,
        .file_mprotect =                selinux_file_mprotect,
        .file_lock =                    selinux_file_lock,
        .file_fcntl =                   selinux_file_fcntl,
index b8c53723e09bfe7d6c211bc05df35793ae9ac8b2..df2de54a958debfab56caf3483cd5d93856e5be4 100644 (file)
@@ -145,7 +145,9 @@ struct security_class_mapping secclass_map[] = {
            "node_bind", "name_connect", NULL } },
        { "memprotect", { "mmap_zero", NULL } },
        { "peer", { "recv", NULL } },
-       { "capability2", { "mac_override", "mac_admin", "syslog", NULL } },
+       { "capability2",
+         { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
+           NULL } },
        { "kernel_service", { "use_as_override", "create_files_as", NULL } },
        { "tun_socket",
          { COMMON_SOCK_PERMS, NULL } },
index 4e93f9ef970b25a78bca26ab2a49962024b3cb50..3ad2902512888282e299b64434a7790d9788e060 100644 (file)
@@ -1259,12 +1259,8 @@ static int sel_make_bools(void)
                if (!inode)
                        goto out;
 
-               ret = -EINVAL;
-               len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
-               if (len < 0)
-                       goto out;
-
                ret = -ENAMETOOLONG;
+               len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
                if (len >= PAGE_SIZE)
                        goto out;
 
@@ -1557,19 +1553,10 @@ static inline u32 sel_ino_to_perm(unsigned long ino)
 static ssize_t sel_read_class(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       ssize_t rc, len;
-       char *page;
        unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
-       page = (char *)__get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
-
-       len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
-       rc = simple_read_from_buffer(buf, count, ppos, page, len);
-       free_page((unsigned long)page);
-
-       return rc;
+       char res[TMPBUFLEN];
+       ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+       return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
 static const struct file_operations sel_class_ops = {
@@ -1580,19 +1567,10 @@ static const struct file_operations sel_class_ops = {
 static ssize_t sel_read_perm(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       ssize_t rc, len;
-       char *page;
        unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
-       page = (char *)__get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
-
-       len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
-       rc = simple_read_from_buffer(buf, count, ppos, page, len);
-       free_page((unsigned long)page);
-
-       return rc;
+       char res[TMPBUFLEN];
+       ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+       return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
 static const struct file_operations sel_perm_ops = {
index d583c054580889eff6f4e9080110aad7ae0370d1..ee0bb5735f35c98d6edfa7bb4c9590ef2a234cc4 100644 (file)
@@ -1171,7 +1171,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
 }
 
 /**
- * smack_file_mmap :
+ * smack_mmap_file :
  * Check permissions for a mmap operation.  The @file may be NULL, e.g.
  * if mapping anonymous memory.
  * @file contains the file structure for file to map (may be NULL).
@@ -1180,10 +1180,9 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
  * @flags contains the operational flags.
  * Return 0 if permission is granted.
  */
-static int smack_file_mmap(struct file *file,
+static int smack_mmap_file(struct file *file,
                           unsigned long reqprot, unsigned long prot,
-                          unsigned long flags, unsigned long addr,
-                          unsigned long addr_only)
+                          unsigned long flags)
 {
        struct smack_known *skp;
        struct smack_rule *srp;
@@ -1198,11 +1197,6 @@ static int smack_file_mmap(struct file *file,
        int tmay;
        int rc;
 
-       /* do DAC check on address space usage */
-       rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-       if (rc || addr_only)
-               return rc;
-
        if (file == NULL || file->f_dentry == NULL)
                return 0;
 
@@ -3482,7 +3476,8 @@ struct security_operations smack_ops = {
        .file_ioctl =                   smack_file_ioctl,
        .file_lock =                    smack_file_lock,
        .file_fcntl =                   smack_file_fcntl,
-       .file_mmap =                    smack_file_mmap,
+       .mmap_file =                    smack_mmap_file,
+       .mmap_addr =                    cap_mmap_addr,
        .file_set_fowner =              smack_file_set_fowner,
        .file_send_sigiotask =          smack_file_send_sigiotask,
        .file_receive =                 smack_file_receive,
index a68aed7fce0205462ce08183f560015eeb0060cb..ec2118d0e27aca3f5fef6c2ddd72f8b166ce98ca 100644 (file)
@@ -502,10 +502,8 @@ static int snd_compr_pause(struct snd_compr_stream *stream)
        if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
                return -EPERM;
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
-       if (!retval) {
+       if (!retval)
                stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
-               wake_up(&stream->runtime->sleep);
-       }
        return retval;
 }
 
@@ -544,6 +542,10 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
        if (!retval) {
                stream->runtime->state = SNDRV_PCM_STATE_SETUP;
                wake_up(&stream->runtime->sleep);
+               stream->runtime->hw_pointer = 0;
+               stream->runtime->app_pointer = 0;
+               stream->runtime->total_bytes_available = 0;
+               stream->runtime->total_bytes_transferred = 0;
        }
        return retval;
 }
index 582aace20ea31357b94a660e2607da4b058ad228..7eca25fae4137947ebf933451e2d2964d2bd9d40 100644 (file)
@@ -37,8 +37,8 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Routines for control of TEA5757/5759 Philips AM/FM radio tuner chips");
 MODULE_LICENSE("GPL");
 
-#define FREQ_LO                 (76U * 16000)
-#define FREQ_HI                (108U * 16000)
+#define FREQ_LO                ((tea->tea5759 ? 760 :  875) * 1600U)
+#define FREQ_HI                ((tea->tea5759 ? 910 : 1080) * 1600U)
 
 /*
  * definitions
@@ -120,9 +120,9 @@ static u32 snd_tea575x_read(struct snd_tea575x *tea)
        return data;
 }
 
-static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+static u32 snd_tea575x_val_to_freq(struct snd_tea575x *tea, u32 val)
 {
-       u32 freq = snd_tea575x_read(tea) & TEA575X_BIT_FREQ_MASK;
+       u32 freq = val & TEA575X_BIT_FREQ_MASK;
 
        if (freq == 0)
                return freq;
@@ -139,6 +139,11 @@ static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
        return clamp(freq * 16, FREQ_LO, FREQ_HI); /* from kHz */
 }
 
+static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+{
+       return snd_tea575x_val_to_freq(tea, snd_tea575x_read(tea));
+}
+
 static void snd_tea575x_set_freq(struct snd_tea575x *tea)
 {
        u32 freq = tea->freq;
@@ -156,6 +161,7 @@ static void snd_tea575x_set_freq(struct snd_tea575x *tea)
        tea->val &= ~TEA575X_BIT_FREQ_MASK;
        tea->val |= freq & TEA575X_BIT_FREQ_MASK;
        snd_tea575x_write(tea, tea->val);
+       tea->freq = snd_tea575x_val_to_freq(tea, tea->val);
 }
 
 /*
@@ -317,7 +323,6 @@ static int tea575x_s_ctrl(struct v4l2_ctrl *ctrl)
 }
 
 static const struct v4l2_file_operations tea575x_fops = {
-       .owner          = THIS_MODULE,
        .unlocked_ioctl = video_ioctl2,
        .open           = v4l2_fh_open,
        .release        = v4l2_fh_release,
@@ -337,7 +342,6 @@ static const struct v4l2_ioctl_ops tea575x_ioctl_ops = {
 };
 
 static const struct video_device tea575x_radio = {
-       .fops           = &tea575x_fops,
        .ioctl_ops      = &tea575x_ioctl_ops,
        .release        = video_device_release_empty,
 };
@@ -349,7 +353,7 @@ static const struct v4l2_ctrl_ops tea575x_ctrl_ops = {
 /*
  * initialize all the tea575x chips
  */
-int snd_tea575x_init(struct snd_tea575x *tea)
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
 {
        int retval;
 
@@ -374,6 +378,9 @@ int snd_tea575x_init(struct snd_tea575x *tea)
        tea->vd.lock = &tea->mutex;
        tea->vd.v4l2_dev = tea->v4l2_dev;
        tea->vd.ctrl_handler = &tea->ctrl_handler;
+       tea->fops = tea575x_fops;
+       tea->fops.owner = owner;
+       tea->vd.fops = &tea->fops;
        set_bit(V4L2_FL_USE_FH_PRIO, &tea->vd.flags);
        /* disable hw_freq_seek if we can't use it */
        if (tea->cannot_read_data)
index 67f47d891959661c326902cf5fa7803d39a09be0..52b5c0bf90c12a640b02deb844c58c0254a8c73c 100644 (file)
@@ -2769,7 +2769,7 @@ static int __devinit snd_es1968_create(struct snd_card *card,
        chip->tea.ops = &snd_es1968_tea_ops;
        strlcpy(chip->tea.card, "SF64-PCE2", sizeof(chip->tea.card));
        sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
-       if (!snd_tea575x_init(&chip->tea))
+       if (!snd_tea575x_init(&chip->tea, THIS_MODULE))
                printk(KERN_INFO "es1968: detected TEA575x radio\n");
 #endif
 
index f696623227503763d9996589034244314f39568c..b32e8024ea86c017812fc517c6d246daec475c73 100644 (file)
@@ -1254,7 +1254,7 @@ static int __devinit snd_fm801_create(struct snd_card *card,
        sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
        if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
            (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
-               if (snd_tea575x_init(&chip->tea)) {
+               if (snd_tea575x_init(&chip->tea, THIS_MODULE)) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
                        snd_fm801_free(chip);
                        return -ENODEV;
@@ -1263,7 +1263,7 @@ static int __devinit snd_fm801_create(struct snd_card *card,
                /* autodetect tuner connection */
                for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
                        chip->tea575x_tuner = tea575x_tuner;
-                       if (!snd_tea575x_init(&chip->tea)) {
+                       if (!snd_tea575x_init(&chip->tea, THIS_MODULE)) {
                                snd_printk(KERN_INFO "detected TEA575x radio type %s\n",
                                           get_tea575x_gpio(chip)->name);
                                break;
index 163b6b5de3eb535fe972e145b6ed35f0182590fa..d03079764189dedef68484f72b649aabd460006c 100644 (file)
@@ -97,19 +97,6 @@ config SND_HDA_CODEC_REALTEK
          snd-hda-codec-realtek.
          This module is automatically loaded at probing.
 
-config SND_HDA_ENABLE_REALTEK_QUIRKS
-       bool "Build static quirks for Realtek codecs"
-       depends on SND_HDA_CODEC_REALTEK
-       default y
-       help
-         Say Y here to build the static quirks codes for Realtek codecs.
-         If you need the "model" preset that the default BIOS auto-parser
-         can't handle, turn this option on.
-
-         If your device works with model=auto option, basically you don't
-         need the quirk code.  By turning this off, you can reduce the
-         module size quite a lot.
-
 config SND_HDA_CODEC_ANALOG
        bool "Build Analog Device HD-audio codec support"
        default y
index 6e9ef3e250935326ccf5d4583760dadc8091be20..f7520b9f909cfb0d577ed745bef272675b23477a 100644 (file)
@@ -618,7 +618,6 @@ int snd_hda_gen_add_verbs(struct hda_gen_spec *spec,
                          const struct hda_verb *list)
 {
        const struct hda_verb **v;
-       snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
        v = snd_array_new(&spec->verbs);
        if (!v)
                return -ENOMEM;
index 2a7889dfbd1b53aff68dfff49f554d6f7e0fec84..632ad0ad3007491b827f7e2e3edcb87dba280c6e 100644 (file)
@@ -157,4 +157,14 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
                        const struct snd_pci_quirk *quirk,
                        const struct hda_fixup *fixlist);
 
+static inline void snd_hda_gen_init(struct hda_gen_spec *spec)
+{
+       snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
+}
+
+static inline void snd_hda_gen_free(struct hda_gen_spec *spec)
+{
+       snd_array_free(&spec->verbs);
+}
+
 #endif /* __SOUND_HDA_AUTO_PARSER_H */
index 41ca803a1fff9d1a4c60baa3889cc3171a990bf2..51cb2a2e4fce03fbfdce96fb4b2c1b23063d45ed 100644 (file)
@@ -1184,6 +1184,7 @@ static void snd_hda_codec_free(struct hda_codec *codec)
 {
        if (!codec)
                return;
+       snd_hda_jack_tbl_clear(codec);
        restore_init_pincfgs(codec);
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        cancel_delayed_work(&codec->power_work);
@@ -1192,6 +1193,7 @@ static void snd_hda_codec_free(struct hda_codec *codec)
        list_del(&codec->list);
        snd_array_free(&codec->mixers);
        snd_array_free(&codec->nids);
+       snd_array_free(&codec->cvt_setups);
        snd_array_free(&codec->conn_lists);
        snd_array_free(&codec->spdif_out);
        codec->bus->caddr_tbl[codec->addr] = NULL;
@@ -2333,6 +2335,8 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        /* free only driver_pins so that init_pins + user_pins are restored */
        snd_array_free(&codec->driver_pins);
        restore_pincfgs(codec);
+       snd_array_free(&codec->cvt_setups);
+       snd_array_free(&codec->spdif_out);
        codec->num_pcms = 0;
        codec->pcm_info = NULL;
        codec->preset = NULL;
@@ -4393,20 +4397,19 @@ void snd_hda_update_power_acct(struct hda_codec *codec)
        codec->power_jiffies += delta;
 }
 
-/**
- * snd_hda_power_up - Power-up the codec
- * @codec: HD-audio codec
- *
- * Increment the power-up counter and power up the hardware really when
- * not turned on yet.
- */
-void snd_hda_power_up(struct hda_codec *codec)
+/* Transition to powered up, if wait_power_down then wait for a pending
+ * transition to D3 to complete. A pending D3 transition is indicated
+ * with power_transition == -1. */
+static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
 {
        struct hda_bus *bus = codec->bus;
 
        spin_lock(&codec->power_lock);
        codec->power_count++;
-       if (codec->power_on || codec->power_transition > 0) {
+       /* Return if power_on or transitioning to power_on, unless currently
+        * powering down. */
+       if ((codec->power_on || codec->power_transition > 0) &&
+           !(wait_power_down && codec->power_transition < 0)) {
                spin_unlock(&codec->power_lock);
                return;
        }
@@ -4430,8 +4433,37 @@ void snd_hda_power_up(struct hda_codec *codec)
        codec->power_transition = 0;
        spin_unlock(&codec->power_lock);
 }
+
+/**
+ * snd_hda_power_up - Power-up the codec
+ * @codec: HD-audio codec
+ *
+ * Increment the power-up counter and power up the hardware really when
+ * not turned on yet.
+ */
+void snd_hda_power_up(struct hda_codec *codec)
+{
+       __snd_hda_power_up(codec, false);
+}
 EXPORT_SYMBOL_HDA(snd_hda_power_up);
 
+/**
+ * snd_hda_power_up_d3wait - Power-up the codec after waiting for any pending
+ *   D3 transition to complete.  This differs from snd_hda_power_up() when
+ *   power_transition == -1.  snd_hda_power_up sees this case as a nop,
+ *   snd_hda_power_up_d3wait waits for the D3 transition to complete then powers
+ *   back up.
+ * @codec: HD-audio codec
+ *
+ * Cancel any power down operation hapenning on the work queue, then power up.
+ */
+void snd_hda_power_up_d3wait(struct hda_codec *codec)
+{
+       /* This will cancel and wait for pending power_work to complete. */
+       __snd_hda_power_up(codec, true);
+}
+EXPORT_SYMBOL_HDA(snd_hda_power_up_d3wait);
+
 #define power_save(codec)      \
        ((codec)->bus->power_save ? *(codec)->bus->power_save : 0)
 
index 4fc3960c85917837508ef32b1d954737061fc926..2fdaadbb4326fad36863fa1c5f1373464614ab88 100644 (file)
@@ -1056,10 +1056,12 @@ const char *snd_hda_get_jack_location(u32 cfg);
  */
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 void snd_hda_power_up(struct hda_codec *codec);
+void snd_hda_power_up_d3wait(struct hda_codec *codec);
 void snd_hda_power_down(struct hda_codec *codec);
 void snd_hda_update_power_acct(struct hda_codec *codec);
 #else
 static inline void snd_hda_power_up(struct hda_codec *codec) {}
+static inline void snd_hda_power_up_d3wait(struct hda_codec *codec) {}
 static inline void snd_hda_power_down(struct hda_codec *codec) {}
 #endif
 
index 2b6392be451c688830cf9d42e346d0eee61ac1dc..7757536b9d5faccfdd33d5ff2ece304d4282a2d0 100644 (file)
@@ -1766,7 +1766,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
                                   buff_step);
        snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
                                   buff_step);
-       snd_hda_power_up(apcm->codec);
+       snd_hda_power_up_d3wait(apcm->codec);
        err = hinfo->ops.open(hinfo, apcm->codec, substream);
        if (err < 0) {
                azx_release_device(azx_dev);
@@ -2484,9 +2484,9 @@ static void azx_notifier_unregister(struct azx *chip)
 static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
 static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
 
+#ifdef SUPPORT_VGA_SWITCHEROO
 static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
 
-#ifdef SUPPORT_VGA_SWITCHEROO
 static void azx_vs_set_state(struct pci_dev *pci,
                             enum vga_switcheroo_state state)
 {
@@ -2578,6 +2578,7 @@ static int __devinit register_vga_switcheroo(struct azx *chip)
 #else
 #define init_vga_switcheroo(chip)              /* NOP */
 #define register_vga_switcheroo(chip)          0
+#define check_hdmi_disabled(pci)       false
 #endif /* SUPPORT_VGA_SWITCHER */
 
 /*
@@ -2638,6 +2639,7 @@ static int azx_dev_free(struct snd_device *device)
        return azx_free(device->device_data);
 }
 
+#ifdef SUPPORT_VGA_SWITCHEROO
 /*
  * Check of disabled HDMI controller by vga-switcheroo
  */
@@ -2670,12 +2672,13 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
        struct pci_dev *p = get_bound_vga(pci);
 
        if (p) {
-               if (vga_default_device() && p != vga_default_device())
+               if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF)
                        vga_inactive = true;
                pci_dev_put(p);
        }
        return vga_inactive;
 }
+#endif /* SUPPORT_VGA_SWITCHEROO */
 
 /*
  * white/black-listing for position_fix
@@ -3351,6 +3354,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        { PCI_DEVICE(0x6549, 0x1200),
          .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
        /* Creative X-Fi (CA0110-IBG) */
+       /* CTHDA chips */
+       { PCI_DEVICE(0x1102, 0x0010),
+         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
+       { PCI_DEVICE(0x1102, 0x0012),
+         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
 #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
        /* the following entry conflicts with snd-ctxfi driver,
         * as ctxfi driver mutates from HD-audio to native mode with
@@ -3367,11 +3375,6 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
          .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
          AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
 #endif
-       /* CTHDA chips */
-       { PCI_DEVICE(0x1102, 0x0010),
-         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
-       { PCI_DEVICE(0x1102, 0x0012),
-         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
        /* Vortex86MX */
        { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
        /* VMware HDAudio */
index 3acb5824ad39e95e354491c2336a445cd796745e..2bf99fc1cbf243a78d06b711df3e9852d5882998 100644 (file)
@@ -445,8 +445,10 @@ static int conexant_init(struct hda_codec *codec)
 
 static void conexant_free(struct hda_codec *codec)
 {
+       struct conexant_spec *spec = codec->spec;
+       snd_hda_gen_free(&spec->gen);
        snd_hda_detach_beep_device(codec);
-       kfree(codec->spec);
+       kfree(spec);
 }
 
 static const struct snd_kcontrol_new cxt_capture_mixers[] = {
@@ -4061,7 +4063,7 @@ static void cx_auto_init_digital(struct hda_codec *codec)
 static int cx_auto_init(struct hda_codec *codec)
 {
        struct conexant_spec *spec = codec->spec;
-       /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/
+       snd_hda_gen_apply_verbs(codec);
        cx_auto_init_output(codec);
        cx_auto_init_input(codec);
        cx_auto_init_digital(codec);
@@ -4466,6 +4468,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
        {}
 };
 
@@ -4497,6 +4500,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
        if (!spec)
                return -ENOMEM;
        codec->spec = spec;
+       snd_hda_gen_init(&spec->gen);
 
        switch (codec->vendor_id) {
        case 0x14f15045:
index 224410e8e9e7461431063a8d1f23b8650362faca..aa4c25e0f3277fce38520c72c9759bfc90b9022d 100644 (file)
@@ -1896,6 +1896,7 @@ static int alc_init(struct hda_codec *codec)
        alc_fix_pll(codec);
        alc_auto_init_amp(codec, spec->init_amp);
 
+       snd_hda_gen_apply_verbs(codec);
        alc_init_special_input_src(codec);
        alc_auto_init_std(codec);
 
@@ -2288,6 +2289,7 @@ static void alc_free(struct hda_codec *codec)
        alc_shutup(codec);
        alc_free_kctls(codec);
        alc_free_bind_ctls(codec);
+       snd_hda_gen_free(&spec->gen);
        kfree(spec);
        snd_hda_detach_beep_device(codec);
 }
@@ -4252,6 +4254,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
                return -ENOMEM;
        codec->spec = spec;
        spec->mixer_nid = mixer_nid;
+       snd_hda_gen_init(&spec->gen);
 
        err = alc_codec_rename_from_preset(codec);
        if (err < 0) {
@@ -6439,6 +6442,7 @@ enum {
        ALC662_FIXUP_ASUS_MODE7,
        ALC662_FIXUP_ASUS_MODE8,
        ALC662_FIXUP_NO_JACK_DETECT,
+       ALC662_FIXUP_ZOTAC_Z68,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
@@ -6588,6 +6592,13 @@ static const struct alc_fixup alc662_fixups[] = {
                .type = ALC_FIXUP_FUNC,
                .v.func = alc_fixup_no_jack_detect,
        },
+       [ALC662_FIXUP_ZOTAC_Z68] = {
+               .type = ALC_FIXUP_PINS,
+               .v.pins = (const struct alc_pincfg[]) {
+                       { 0x1b, 0x02214020 }, /* Front HP */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6601,6 +6612,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+       SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
        SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
 
 #if 0
@@ -6676,6 +6688,31 @@ static const struct alc_model_fixup alc662_fixup_models[] = {
        {}
 };
 
+static void alc662_fill_coef(struct hda_codec *codec)
+{
+       int val, coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030) {
+                       val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
+                       alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
+               }
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
+               break;
+       }
+}
 
 /*
  */
@@ -6695,12 +6732,8 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
-       if ((alc_get_coef0(codec) & (1 << 14)) &&
-           codec->bus->pci->subsystem_vendor == 0x1025 &&
-           spec->cdefine.platform_type == 1) {
-               if (alc_codec_rename(codec, "ALC272X") < 0)
-                       goto error;
-       }
+       spec->init_hook = alc662_fill_coef;
+       alc662_fill_coef(codec);
 
        alc_pick_fixup(codec, alc662_fixup_models,
                       alc662_fixup_tbl, alc662_fixups);
@@ -6708,6 +6741,13 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_auto_parse_customize_define(codec);
 
+       if ((alc_get_coef0(codec) & (1 << 14)) &&
+           codec->bus->pci->subsystem_vendor == 0x1025 &&
+           spec->cdefine.platform_type == 1) {
+               if (alc_codec_rename(codec, "ALC272X") < 0)
+                       goto error;
+       }
+
        /* automatic parse from the BIOS config */
        err = alc662_parse_auto_config(codec);
        if (err < 0)
@@ -6790,6 +6830,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
        { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
        { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+       { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
          .patch = patch_alc861 },
        { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
index 7db8228f1b882c013f4105e535af8ceaae3c1fbd..07675282015a5a87a7572b4bb632abae1684e0e9 100644 (file)
@@ -4367,7 +4367,7 @@ static int stac92xx_init(struct hda_codec *codec)
                                         AC_PINCTL_IN_EN);
        for (i = 0; i < spec->num_pwrs; i++)  {
                hda_nid_t nid = spec->pwr_nids[i];
-               int pinctl, def_conf;
+               unsigned int pinctl, def_conf;
 
                def_conf = snd_hda_codec_get_pincfg(codec, nid);
                def_conf = get_defcfg_connect(def_conf);
@@ -4376,6 +4376,11 @@ static int stac92xx_init(struct hda_codec *codec)
                        stac_toggle_power_map(codec, nid, 0);
                        continue;
                }
+               if (def_conf == AC_JACK_PORT_FIXED) {
+                       /* no need for jack detection for fixed pins */
+                       stac_toggle_power_map(codec, nid, 1);
+                       continue;
+               }
                /* power on when no jack detection is available */
                /* or when the VREF is used for controlling LED */
                if (!spec->hp_detect ||
index 0a5027b94714afb9600b94922f6d89140271492d..b8ac8710f47fb200d602bc2917598a153cad6884 100644 (file)
@@ -1988,6 +1988,13 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
        period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ);
        rate = hdspm_calc_dds_value(hdspm, period);
 
+       if (rate > 207000) {
+               /* Unreasonable high sample rate as seen on PCI MADI cards.
+                * Use the cached value instead.
+                */
+               rate = hdspm->system_sample_rate;
+       }
+
        return rate;
 }
 
index 64d2a4fa34b27d81ad84d0e64ed625e04e293b7a..e9b62b5ea637580ea7e8904ec2c68662ed1471fc 100644 (file)
@@ -935,9 +935,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
        }
 
 found:
-       data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
-       snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
-                     data | (pll_p << PLLP_SHIFT));
+       snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
        snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
                      pll_r << PLLR_SHIFT);
        snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
index 6f097fb60683e7b5605b38e373c7f1c75f4158c9..08c7f6685ff0937a367824c7a731fd4fb8e48d72 100644 (file)
 
 /* PLL registers bitfields */
 #define PLLP_SHIFT             0
+#define PLLP_MASK              7
 #define PLLQ_SHIFT             3
 #define PLLR_SHIFT             0
 #define PLLJ_SHIFT             2
index a75c3766aedeec9192c0d887ebe9b8ff366e1e5b..0418fa11e6bd1a828a40249a4fb65f63c47255a3 100644 (file)
@@ -99,8 +99,9 @@ static void wm2000_reset(struct wm2000_priv *wm2000)
 }
 
 static int wm2000_poll_bit(struct i2c_client *i2c,
-                          unsigned int reg, u8 mask, int timeout)
+                          unsigned int reg, u8 mask)
 {
+       int timeout = 4000;
        int val;
 
        val = wm2000_read(i2c, reg);
@@ -119,7 +120,7 @@ static int wm2000_poll_bit(struct i2c_client *i2c,
 static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int ret, timeout;
+       int ret;
 
        BUG_ON(wm2000->anc_mode != ANC_OFF);
 
@@ -140,13 +141,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 
        /* Wait for ANC engine to become ready */
        if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-                            WM2000_ANC_ENG_IDLE, 1)) {
+                            WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev, "ANC engine failed to reset\n");
                return -ETIMEDOUT;
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_BOOT_COMPLETE, 1)) {
+                            WM2000_STATUS_BOOT_COMPLETE)) {
                dev_err(&i2c->dev, "ANC engine failed to initialise\n");
                return -ETIMEDOUT;
        }
@@ -173,16 +174,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
        dev_dbg(&i2c->dev, "Download complete\n");
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
 
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_MOUSE_ENABLE |
                             WM2000_MODE_THERMAL_ENABLE);
        } else {
-               timeout = 10;
-
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_MOUSE_ENABLE |
                             WM2000_MODE_THERMAL_ENABLE);
@@ -201,9 +199,8 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
        wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_MOUSE_ACTIVE, timeout)) {
-               dev_err(&i2c->dev, "Timed out waiting for device after %dms\n",
-                       timeout * 10);
+                            WM2000_STATUS_MOUSE_ACTIVE)) {
+               dev_err(&i2c->dev, "Timed out waiting for device\n");
                return -ETIMEDOUT;
        }
 
@@ -218,28 +215,25 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 static int wm2000_power_down(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int timeout;
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_POWER_DOWN);
        } else {
-               timeout = 10;
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_POWER_DOWN);
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) {
+                            WM2000_STATUS_POWER_DOWN_COMPLETE)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC power down\n");
                return -ETIMEDOUT;
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-                            WM2000_ANC_ENG_IDLE, 1)) {
+                            WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
                return -ETIMEDOUT;
        }
@@ -268,13 +262,13 @@ static int wm2000_enter_bypass(struct i2c_client *i2c, int analogue)
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_ANC_DISABLED, 10)) {
+                            WM2000_STATUS_ANC_DISABLED)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC disable\n");
                return -ETIMEDOUT;
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-                            WM2000_ANC_ENG_IDLE, 1)) {
+                            WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
                return -ETIMEDOUT;
        }
@@ -311,7 +305,7 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue)
        wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_MOUSE_ACTIVE, 10)) {
+                            WM2000_STATUS_MOUSE_ACTIVE)) {
                dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
                return -ETIMEDOUT;
        }
@@ -325,38 +319,32 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue)
 static int wm2000_enter_standby(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int timeout;
 
        BUG_ON(wm2000->anc_mode != ANC_ACTIVE);
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
 
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_STANDBY_ENTRY);
        } else {
-               timeout = 10;
-
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_STANDBY_ENTRY);
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_ANC_DISABLED, timeout)) {
+                            WM2000_STATUS_ANC_DISABLED)) {
                dev_err(&i2c->dev,
                        "Timed out waiting for ANC disable after 1ms\n");
                return -ETIMEDOUT;
        }
 
-       if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE,
-                            1)) {
+       if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev,
-                       "Timed out waiting for standby after %dms\n",
-                       timeout * 10);
+                       "Timed out waiting for standby\n");
                return -ETIMEDOUT;
        }
 
@@ -374,23 +362,19 @@ static int wm2000_enter_standby(struct i2c_client *i2c, int analogue)
 static int wm2000_exit_standby(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int timeout;
 
        BUG_ON(wm2000->anc_mode != ANC_STANDBY);
 
        wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0);
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
 
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_MOUSE_ENABLE);
        } else {
-               timeout = 10;
-
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_MOUSE_ENABLE);
@@ -400,9 +384,8 @@ static int wm2000_exit_standby(struct i2c_client *i2c, int analogue)
        wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_MOUSE_ACTIVE, timeout)) {
-               dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n",
-                       timeout * 10);
+                            WM2000_STATUS_MOUSE_ACTIVE)) {
+               dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
                return -ETIMEDOUT;
        }
 
index acbdc5fde9236be0e43a5767bd0135eb92450e68..32682c1b7cdece413693db6f6487ae4df640da09 100644 (file)
@@ -1491,6 +1491,7 @@ static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
 
 static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
        5644800,
+       3763200,
        2882400,
        1881600,
        1411200,
index 65d525d74c549ee9d83bbb1f1ca0a982a103a567..812acd83fb4889e3413c76c0269fd2e36e33286b 100644 (file)
@@ -1863,6 +1863,7 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
                                return ret;
                        }
 
+                       regcache_cache_only(wm8904->regmap, false);
                        regcache_sync(wm8904->regmap);
 
                        /* Enable bias */
@@ -1899,14 +1900,8 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
                snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
                                    WM8904_BIAS_ENA, 0);
 
-#ifdef CONFIG_REGULATOR
-               /* Post 2.6.34 we will be able to get a callback when
-                * the regulators are disabled which we can use but
-                * for now just assume that the power will be cut if
-                * the regulator API is in use.
-                */
-               codec->cache_sync = 1;
-#endif
+               regcache_cache_only(wm8904->regmap, true);
+               regcache_mark_dirty(wm8904->regmap);
 
                regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies),
                                       wm8904->supplies);
@@ -2084,10 +2079,8 @@ static int wm8904_probe(struct snd_soc_codec *codec)
 {
        struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
        struct wm8904_pdata *pdata = wm8904->pdata;
-       u16 *reg_cache = codec->reg_cache;
        int ret, i;
 
-       codec->cache_sync = 1;
        codec->control_data = wm8904->regmap;
 
        switch (wm8904->devtype) {
@@ -2150,6 +2143,7 @@ static int wm8904_probe(struct snd_soc_codec *codec)
                goto err_enable;
        }
 
+       regcache_cache_only(wm8904->regmap, true);
        /* Change some default settings - latch VU and enable ZC */
        snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT,
                            WM8904_ADC_VU, WM8904_ADC_VU);
@@ -2180,14 +2174,18 @@ static int wm8904_probe(struct snd_soc_codec *codec)
                        if (!pdata->gpio_cfg[i])
                                continue;
 
-                       reg_cache[WM8904_GPIO_CONTROL_1 + i]
-                               = pdata->gpio_cfg[i] & 0xffff;
+                       regmap_update_bits(wm8904->regmap,
+                                          WM8904_GPIO_CONTROL_1 + i,
+                                          0xffff,
+                                          pdata->gpio_cfg[i]);
                }
 
                /* Zero is the default value for these anyway */
                for (i = 0; i < WM8904_MIC_REGS; i++)
-                       reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i]
-                               = pdata->mic_cfg[i];
+                       regmap_update_bits(wm8904->regmap,
+                                          WM8904_MIC_BIAS_CONTROL_0 + i,
+                                          0xffff,
+                                          pdata->mic_cfg[i]);
        }
 
        /* Set Class W by default - this will be managed by the Class
index 993639d694ce308aed067cea3003fcbf6ad2ee2b..1436b6ce74d1319b347bc8294dee332a616a02c3 100644 (file)
 #define WM8994_NUM_DRC 3
 #define WM8994_NUM_EQ  3
 
+static struct {
+       unsigned int reg;
+       unsigned int mask;
+} wm8994_vu_bits[] = {
+       { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
+       { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
+       { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
+       { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
+       { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU },
+       { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU },
+       { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
+       { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
+       { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU },
+       { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU },
+
+       { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU },
+       { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU },
+       { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
+       { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
+       { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU },
+       { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU },
+       { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU },
+       { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU },
+       { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
+       { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+       { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU },
+       { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+       { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU },
+       { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU },
+       { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU },
+       { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU },
+};
+
 static int wm8994_drc_base[] = {
        WM8994_AIF1_DRC1_1,
        WM8994_AIF1_DRC2_1,
@@ -691,9 +724,6 @@ static void wm1811_jackdet_set_mode(struct snd_soc_codec *codec, u16 mode)
 {
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 
-       if (!wm8994->jackdet || !wm8994->jack_cb)
-               return;
-
        if (!wm8994->jackdet || !wm8994->jack_cb)
                return;
 
@@ -989,6 +1019,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
        struct snd_soc_codec *codec = w->codec;
        struct wm8994 *control = codec->control_data;
        int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
+       int i;
        int dac;
        int adc;
        int val;
@@ -1047,6 +1078,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
                                    WM8994_AIF1DAC2L_ENA);
                break;
 
+       case SND_SOC_DAPM_POST_PMU:
+               for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+                       snd_soc_write(codec, wm8994_vu_bits[i].reg,
+                                     snd_soc_read(codec,
+                                                  wm8994_vu_bits[i].reg));
+               break;
+
        case SND_SOC_DAPM_PRE_PMD:
        case SND_SOC_DAPM_POST_PMD:
                snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1072,6 +1110,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
                      struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = w->codec;
+       int i;
        int dac;
        int adc;
        int val;
@@ -1122,6 +1161,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
                                    WM8994_AIF2DACR_ENA);
                break;
 
+       case SND_SOC_DAPM_POST_PMU:
+               for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+                       snd_soc_write(codec, wm8994_vu_bits[i].reg,
+                                     snd_soc_read(codec,
+                                                  wm8994_vu_bits[i].reg));
+               break;
+
        case SND_SOC_DAPM_PRE_PMD:
        case SND_SOC_DAPM_POST_PMD:
                snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1190,17 +1236,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w,
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
                if (wm8994->aif1clk_enable) {
-                       aif1clk_ev(w, kcontrol, event);
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
                        snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
                                            WM8994_AIF1CLK_ENA_MASK,
                                            WM8994_AIF1CLK_ENA);
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
                        wm8994->aif1clk_enable = 0;
                }
                if (wm8994->aif2clk_enable) {
-                       aif2clk_ev(w, kcontrol, event);
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
                        snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
                                            WM8994_AIF2CLK_ENA_MASK,
                                            WM8994_AIF2CLK_ENA);
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
                        wm8994->aif2clk_enable = 0;
                }
                break;
@@ -1221,15 +1269,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w,
        switch (event) {
        case SND_SOC_DAPM_POST_PMD:
                if (wm8994->aif1clk_disable) {
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
                        snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
                                            WM8994_AIF1CLK_ENA_MASK, 0);
-                       aif1clk_ev(w, kcontrol, event);
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
                        wm8994->aif1clk_disable = 0;
                }
                if (wm8994->aif2clk_disable) {
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
                        snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
                                            WM8994_AIF2CLK_ENA_MASK, 0);
-                       aif2clk_ev(w, kcontrol, event);
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
                        wm8994->aif2clk_disable = 0;
                }
                break;
@@ -1527,9 +1577,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
 
 static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
 SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
-                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+                   SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
-                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+                   SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
 SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
                   left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -3879,39 +3931,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
 
        pm_runtime_put(codec->dev);
 
-       /* Latch volume updates (right only; we always do left then right). */
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
-                           WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
-                           WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
-                           WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
-                           WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
-                           WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
-                           WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
-                           WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
-                           WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
-                           WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
-                           WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
-                           WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
-                           WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
-                           WM8994_DAC1_VU, WM8994_DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
-                           WM8994_DAC1_VU, WM8994_DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
-                           WM8994_DAC2_VU, WM8994_DAC2_VU);
-       snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
-                           WM8994_DAC2_VU, WM8994_DAC2_VU);
+       /* Latch volume update bits */
+       for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+               snd_soc_update_bits(codec, wm8994_vu_bits[i].reg,
+                                   wm8994_vu_bits[i].mask,
+                                   wm8994_vu_bits[i].mask);
 
        /* Set the low bit of the 3D stereo depth so TLV matches */
        snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2,
index 8af422e38fd060118968eb6f3bf62d707b32584e..dc9b42b7fc4d79d749f07be41f1fe3484eb987e5 100644 (file)
@@ -2837,8 +2837,6 @@ static int wm8996_probe(struct snd_soc_codec *codec)
                }
        }
 
-       regcache_cache_only(codec->control_data, true);
-
        /* Apply platform data settings */
        snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL,
                            WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK,
@@ -3051,7 +3049,6 @@ static int wm8996_remove(struct snd_soc_codec *codec)
        for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
                regulator_unregister_notifier(wm8996->supplies[i].consumer,
                                              &wm8996->disable_nb[i]);
-       regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
 
        return 0;
 }
@@ -3206,14 +3203,15 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
        dev_info(&i2c->dev, "revision %c\n",
                 (reg & WM8996_CHIP_REV_MASK) + 'A');
 
-       regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
-
        ret = wm8996_reset(wm8996);
        if (ret < 0) {
                dev_err(&i2c->dev, "Failed to issue reset\n");
                goto err_regmap;
        }
 
+       regcache_cache_only(wm8996->regmap, true);
+       regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
+
        wm8996_init_gpio(wm8996);
 
        ret = snd_soc_register_codec(&i2c->dev,
index f23700359c672372f84a58df8701176b57e6d475..080327414c6b61da37d62a845261e93f413864fa 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/pinctrl/consumer.h>
 
 #include "imx-audmux.h"
 
@@ -249,6 +250,7 @@ EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port);
 static int __devinit imx_audmux_probe(struct platform_device *pdev)
 {
        struct resource *res;
+       struct pinctrl *pinctrl;
        const struct of_device_id *of_id =
                        of_match_device(imx_audmux_dt_ids, &pdev->dev);
 
@@ -257,6 +259,12 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev)
        if (!audmux_base)
                return -EADDRNOTAVAIL;
 
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl)) {
+               dev_err(&pdev->dev, "setup pinctrl failed!");
+               return PTR_ERR(pinctrl);
+       }
+
        audmux_clk = clk_get(&pdev->dev, "audmux");
        if (IS_ERR(audmux_clk)) {
                dev_dbg(&pdev->dev, "cannot get clock: %ld\n",
index cf3ed0362c9ccf76cbbdc975c8788b33725b70f7..28dd76c7cb1c08ac308ca7730d9abfc8b02b0903 100644 (file)
@@ -543,7 +543,7 @@ static int imx_ssi_probe(struct platform_device *pdev)
                        ret);
                goto failed_clk;
        }
-       clk_enable(ssi->clk);
+       clk_prepare_enable(ssi->clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
@@ -641,7 +641,7 @@ failed_ac97:
 failed_ioremap:
        release_mem_region(res->start, resource_size(res));
 failed_get_resource:
-       clk_disable(ssi->clk);
+       clk_disable_unprepare(ssi->clk);
        clk_put(ssi->clk);
 failed_clk:
        kfree(ssi);
@@ -664,7 +664,7 @@ static int __devexit imx_ssi_remove(struct platform_device *pdev)
 
        iounmap(ssi->base);
        release_mem_region(res->start, resource_size(res));
-       clk_disable(ssi->clk);
+       clk_disable_unprepare(ssi->clk);
        clk_put(ssi->clk);
        kfree(ssi);
 
index 1c2aa7fab3fd1f2beed5232187e959e69f38e7bc..4da5fc55c7ee81d81369fae163344e8058c96b86 100644 (file)
@@ -33,7 +33,6 @@
 
 #include <mach/hardware.h>
 #include <mach/dma.h>
-#include <mach/audio.h>
 
 #include "../../arm/pxa2xx-pcm.h"
 #include "pxa-ssp.h"
@@ -194,7 +193,7 @@ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div)
 {
        u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
 
-       if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) {
+       if (ssp->type == PXA25x_SSP) {
                sscr0 &= ~0x0000ff00;
                sscr0 |= ((div - 2)/2) << 8; /* 2..512 */
        } else {
@@ -212,7 +211,7 @@ static u32 pxa_ssp_get_scr(struct ssp_device *ssp)
        u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
        u32 div;
 
-       if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP)
+       if (ssp->type == PXA25x_SSP)
                div = ((sscr0 >> 8) & 0xff) * 2 + 2;
        else
                div = ((sscr0 >> 8) & 0xfff) + 1;
@@ -242,7 +241,7 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
                break;
        case PXA_SSP_CLK_PLL:
                /* Internal PLL is fixed */
-               if (cpu_is_pxa25x())
+               if (ssp->type == PXA25x_SSP)
                        priv->sysclk = 1843200;
                else
                        priv->sysclk = 13000000;
@@ -266,11 +265,11 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
 
        /* The SSP clock must be disabled when changing SSP clock mode
         * on PXA2xx.  On PXA3xx it must be enabled when doing so. */
-       if (!cpu_is_pxa3xx())
+       if (ssp->type != PXA3xx_SSP)
                clk_disable(ssp->clk);
        val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0;
        pxa_ssp_write_reg(ssp, SSCR0, val);
-       if (!cpu_is_pxa3xx())
+       if (ssp->type != PXA3xx_SSP)
                clk_enable(ssp->clk);
 
        return 0;
@@ -294,24 +293,20 @@ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
        case PXA_SSP_AUDIO_DIV_SCDB:
                val = pxa_ssp_read_reg(ssp, SSACD);
                val &= ~SSACD_SCDB;
-#if defined(CONFIG_PXA3xx)
-               if (cpu_is_pxa3xx())
+               if (ssp->type == PXA3xx_SSP)
                        val &= ~SSACD_SCDX8;
-#endif
                switch (div) {
                case PXA_SSP_CLK_SCDB_1:
                        val |= SSACD_SCDB;
                        break;
                case PXA_SSP_CLK_SCDB_4:
                        break;
-#if defined(CONFIG_PXA3xx)
                case PXA_SSP_CLK_SCDB_8:
-                       if (cpu_is_pxa3xx())
+                       if (ssp->type == PXA3xx_SSP)
                                val |= SSACD_SCDX8;
                        else
                                return -EINVAL;
                        break;
-#endif
                default:
                        return -EINVAL;
                }
@@ -337,10 +332,8 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
        struct ssp_device *ssp = priv->ssp;
        u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70;
 
-#if defined(CONFIG_PXA3xx)
-       if (cpu_is_pxa3xx())
+       if (ssp->type == PXA3xx_SSP)
                pxa_ssp_write_reg(ssp, SSACDD, 0);
-#endif
 
        switch (freq_out) {
        case 5622000:
@@ -365,11 +358,10 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
                break;
 
        default:
-#ifdef CONFIG_PXA3xx
                /* PXA3xx has a clock ditherer which can be used to generate
                 * a wider range of frequencies - calculate a value for it.
                 */
-               if (cpu_is_pxa3xx()) {
+               if (ssp->type == PXA3xx_SSP) {
                        u32 val;
                        u64 tmp = 19968;
                        tmp *= 1000000;
@@ -386,7 +378,6 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
                                val, freq_out);
                        break;
                }
-#endif
 
                return -EINVAL;
        }
@@ -590,10 +581,8 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
        /* bit size */
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
-#ifdef CONFIG_PXA3xx
-               if (cpu_is_pxa3xx())
+               if (ssp->type == PXA3xx_SSP)
                        sscr0 |= SSCR0_FPCKE;
-#endif
                sscr0 |= SSCR0_DataSize(16);
                break;
        case SNDRV_PCM_FORMAT_S24_LE:
@@ -618,9 +607,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
                        * trying and failing a lot; some of the registers
                        * needed for that mode are only available on PXA3xx.
                        */
-
-#ifdef CONFIG_PXA3xx
-                       if (!cpu_is_pxa3xx())
+                       if (ssp->type != PXA3xx_SSP)
                                return -EINVAL;
 
                        sspsp |= SSPSP_SFRMWDTH(width * 2);
@@ -628,9 +615,6 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
                        sspsp |= SSPSP_EDMYSTOP(3);
                        sspsp |= SSPSP_DMYSTOP(3);
                        sspsp |= SSPSP_DMYSTRT(1);
-#else
-                       return -EINVAL;
-#endif
                } else {
                        /* The frame width is the width the LRCLK is
                         * asserted for; the delay is expressed in
index 7cee22515d9de55c505eae1dd460107b1eacee3a..2ef98536f1da9ecff07ebd3d6903a8bf78345029 100644 (file)
@@ -1052,6 +1052,13 @@ static int fsi_dma_quit(struct fsi_priv *fsi, struct fsi_stream *io)
        return 0;
 }
 
+static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
+{
+       struct snd_pcm_runtime *runtime = io->substream->runtime;
+
+       return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
+}
+
 static void fsi_dma_complete(void *data)
 {
        struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1061,7 +1068,7 @@ static void fsi_dma_complete(void *data)
        enum dma_data_direction dir = fsi_stream_is_play(fsi, io) ?
                DMA_TO_DEVICE : DMA_FROM_DEVICE;
 
-       dma_sync_single_for_cpu(dai->dev, io->dma,
+       dma_sync_single_for_cpu(dai->dev, fsi_dma_get_area(io),
                        samples_to_bytes(runtime, io->period_samples), dir);
 
        io->buff_sample_pos += io->period_samples;
@@ -1078,13 +1085,6 @@ static void fsi_dma_complete(void *data)
        snd_pcm_period_elapsed(io->substream);
 }
 
-static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
-{
-       struct snd_pcm_runtime *runtime = io->substream->runtime;
-
-       return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
-}
-
 static void fsi_dma_do_tasklet(unsigned long data)
 {
        struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1110,7 +1110,7 @@ static void fsi_dma_do_tasklet(unsigned long data)
        len     = samples_to_bytes(runtime, io->period_samples);
        buf     = fsi_dma_get_area(io);
 
-       dma_sync_single_for_device(dai->dev, io->dma, len, dir);
+       dma_sync_single_for_device(dai->dev, buf, len, dir);
 
        sg_init_table(&sg, 1);
        sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
@@ -1172,9 +1172,16 @@ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
 static void fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io,
                                 int start)
 {
+       struct fsi_master *master = fsi_get_master(fsi);
+       u32 clk  = fsi_is_port_a(fsi) ? CRA  : CRB;
        u32 enable = start ? DMA_ON : 0;
 
        fsi_reg_mask_set(fsi, OUT_DMAC, DMA_ON, enable);
+
+       dmaengine_terminate_all(io->chan);
+
+       if (fsi_is_clk_master(fsi))
+               fsi_master_mask_set(master, CLK_RST, clk, (enable) ? clk : 0);
 }
 
 static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io)
index 90ee77d2409da8402ea58026b788a624f850a25a..89eae93445cf517e5e356297374fcbcf183529ae 100644 (file)
@@ -913,7 +913,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
                        /* do we need to add this widget to the list ? */
                        if (list) {
                                int err;
-                               err = dapm_list_add_widget(list, path->sink);
+                               err = dapm_list_add_widget(list, path->source);
                                if (err < 0) {
                                        dev_err(widget->dapm->dev, "could not add widget %s\n",
                                                widget->name);
@@ -954,7 +954,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
        if (stream == SNDRV_PCM_STREAM_PLAYBACK)
                paths = is_connected_output_ep(dai->playback_widget, list);
        else
-               paths = is_connected_input_ep(dai->playback_widget, list);
+               paths = is_connected_input_ep(dai->capture_widget, list);
 
        trace_snd_soc_dapm_connected(paths, stream);
        dapm_clear_walk(&card->dapm);
index bedd1717a37381376e6084a5e631d739dbb06cb3..48fd15b312c1e46d06945640dcbc2df0cd1eabe2 100644 (file)
@@ -794,6 +794,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
                for (i = 0; i < card->num_links; i++) {
                        be = &card->rtd[i];
 
+                       if (!be->dai_link->no_pcm)
+                               continue;
+
                        if (be->cpu_dai->playback_widget == widget ||
                                be->codec_dai->playback_widget == widget)
                                return be;
@@ -803,6 +806,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
                for (i = 0; i < card->num_links; i++) {
                        be = &card->rtd[i];
 
+                       if (!be->dai_link->no_pcm)
+                               continue;
+
                        if (be->cpu_dai->capture_widget == widget ||
                                be->codec_dai->capture_widget == widget)
                                return be;
index 57cd419f743ec5ee7a84f24ed0ad9532de01fd6a..f43edb364a185de5cb2f3c43aa16850a32c2236e 100644 (file)
@@ -629,3 +629,4 @@ MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
 MODULE_DESCRIPTION("Tegra30 AHUB driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match);
index 0b0df49d9d33b071118576c340b69366f212663f..3b6da91188a9949faaacaa73539f3fa04dbbcd4b 100644 (file)
@@ -346,6 +346,17 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
        return 0;
 }
 
+static int tegra_wm8903_remove(struct snd_soc_card *card)
+{
+       struct snd_soc_pcm_runtime *rtd = &(card->rtd[0]);
+       struct snd_soc_dai *codec_dai = rtd->codec_dai;
+       struct snd_soc_codec *codec = codec_dai->codec;
+
+       wm8903_mic_detect(codec, NULL, 0, 0);
+
+       return 0;
+}
+
 static struct snd_soc_dai_link tegra_wm8903_dai = {
        .name = "WM8903",
        .stream_name = "WM8903 PCM",
@@ -363,6 +374,8 @@ static struct snd_soc_card snd_soc_tegra_wm8903 = {
        .dai_link = &tegra_wm8903_dai,
        .num_links = 1,
 
+       .remove = tegra_wm8903_remove,
+
        .controls = tegra_wm8903_controls,
        .num_controls = ARRAY_SIZE(tegra_wm8903_controls),
        .dapm_widgets = tegra_wm8903_dapm_widgets,
index 6f9715ab32fe77cc3fd39de42410efe1c2aa1006..56ad923bf6b5cb74db2f5a56cdc2a16807c66a03 100644 (file)
@@ -209,7 +209,7 @@ static int usb6fire_fw_ezusb_upload(
        int ret;
        u8 data;
        struct usb_device *device = interface_to_usbdev(intf);
-       const struct firmware *fw = 0;
+       const struct firmware *fw = NULL;
        struct ihex_record *rec = kmalloc(sizeof(struct ihex_record),
                        GFP_KERNEL);
 
index 0d37238b84572fed54cef1c89ee23e1d44d8ae00..2b9fffff23b62a5449d39f2a4b03cbaadb1529a6 100644 (file)
@@ -119,6 +119,7 @@ struct snd_usb_substream {
        unsigned long unlink_mask;      /* bitmask of unlinked urbs */
 
        /* data and sync endpoints for this stream */
+       unsigned int ep_num;            /* the endpoint number */
        struct snd_usb_endpoint *data_endpoint;
        struct snd_usb_endpoint *sync_endpoint;
        unsigned long flags;
index e6906901debbc2aaa71c40621fbd4241d8d15663..0f647d22cb4ac7bccffe25311011255cf0b33104 100644 (file)
@@ -414,7 +414,7 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
 {
        struct list_head *p;
        struct snd_usb_endpoint *ep;
-       int ret, is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
+       int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
 
        mutex_lock(&chip->mutex);
 
@@ -434,16 +434,6 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
                    type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
                    ep_num);
 
-       /* select the alt setting once so the endpoints become valid */
-       ret = usb_set_interface(chip->dev, alts->desc.bInterfaceNumber,
-                               alts->desc.bAlternateSetting);
-       if (ret < 0) {
-               snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                       __func__, ret);
-               ep = NULL;
-               goto __exit_unlock;
-       }
-
        ep = kzalloc(sizeof(*ep), GFP_KERNEL);
        if (!ep)
                goto __exit_unlock;
@@ -831,9 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
        if (++ep->use_count != 1)
                return 0;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return -EINVAL;
-
        /* just to be sure */
        deactivate_urbs(ep, 0, 1);
        wait_clear_urbs(ep);
@@ -911,9 +898,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        if (snd_BUG_ON(ep->use_count == 0))
                return;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return;
-
        if (--ep->use_count == 0) {
                deactivate_urbs(ep, force, can_sleep);
                ep->data_subs = NULL;
@@ -926,42 +910,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        }
 }
 
-/**
- * snd_usb_endpoint_activate: activate an snd_usb_endpoint
- *
- * @ep: the endpoint to activate
- *
- * If the endpoint is not currently in use, this functions will select the
- * correct alternate interface setting for the interface of this endpoint.
- *
- * In case of any active users, this functions does nothing.
- *
- * Returns an error if usb_set_interface() failed, 0 in all other
- * cases.
- */
-int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep)
-{
-       if (ep->use_count != 0)
-               return 0;
-
-       if (!ep->chip->shutdown &&
-           !test_and_set_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, ep->alt_idx);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s() usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
-                       return ret;
-               }
-
-               return 0;
-       }
-
-       return -EBUSY;
-}
-
 /**
  * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
  *
@@ -980,24 +928,15 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
        if (!ep)
                return -EINVAL;
 
+       deactivate_urbs(ep, 1, 1);
+       wait_clear_urbs(ep);
+
        if (ep->use_count != 0)
                return 0;
 
-       if (!ep->chip->shutdown &&
-           test_and_clear_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, 0);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       return ret;
-               }
+       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
 
-               return 0;
-       }
-
-       return -EBUSY;
+       return 0;
 }
 
 /**
index 41daaa24c25f465e6ff209118d4a24f6f68ec220..e71fe55cebefa8285963643e7a592a0b71fbfc73 100644 (file)
@@ -341,6 +341,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = audigy2nx_map,
                .selector_map = audigy2nx_selectors,
        },
+       {       /* Logitech, Inc. QuickCam Pro for Notebooks */
+               .id = USB_ID(0x046d, 0x0991),
+               .ignore_ctl_error = 1,
+       },
+       {       /* Logitech, Inc. QuickCam E 3500 */
+               .id = USB_ID(0x046d, 0x09a4),
+               .ignore_ctl_error = 1,
+       },
        {
                /* Hercules DJ Console (Windows Edition) */
                .id = USB_ID(0x06f8, 0xb000),
index 24839d932648c81849ecfe63214a8124d914dd0e..a1298f379428280045bf661f89e5fa59bb0acb48 100644 (file)
@@ -261,19 +261,6 @@ static void stop_endpoints(struct snd_usb_substream *subs,
                                      force, can_sleep, wait);
 }
 
-static int activate_endpoints(struct snd_usb_substream *subs)
-{
-       if (subs->sync_endpoint) {
-               int ret;
-
-               ret = snd_usb_endpoint_activate(subs->sync_endpoint);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return snd_usb_endpoint_activate(subs->data_endpoint);
-}
-
 static int deactivate_endpoints(struct snd_usb_substream *subs)
 {
        int reta, retb;
@@ -314,6 +301,33 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        if (fmt == subs->cur_audiofmt)
                return 0;
 
+       /* close the old interface */
+       if (subs->interface >= 0 && subs->interface != fmt->iface) {
+               err = usb_set_interface(subs->dev, subs->interface, 0);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed (%d)\n",
+                               dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               subs->interface = -1;
+               subs->altset_idx = 0;
+       }
+
+       /* set interface */
+       if (subs->interface != fmt->iface ||
+           subs->altset_idx != fmt->altset_idx) {
+               err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed (%d)\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               snd_printdd(KERN_INFO "setting usb interface %d:%d\n",
+                               fmt->iface, fmt->altsetting);
+               subs->interface = fmt->iface;
+               subs->altset_idx = fmt->altset_idx;
+       }
+
        subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip,
                                                   alts, fmt->endpoint, subs->direction,
                                                   SND_USB_ENDPOINT_TYPE_DATA);
@@ -354,17 +368,21 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
                    (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
                     get_endpoint(alts, 1)->bSynchAddress != 0 &&
                     !implicit_fb)) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting);
+                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting,
+                                  get_endpoint(alts, 1)->bmAttributes,
+                                  get_endpoint(alts, 1)->bLength,
+                                  get_endpoint(alts, 1)->bSynchAddress);
                        return -EINVAL;
                }
                ep = get_endpoint(alts, 1)->bEndpointAddress;
-               if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+               if (!implicit_fb &&
+                   get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
                    (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
-                    (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)) ||
-                    ( is_playback && !implicit_fb))) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting);
+                    (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting,
+                                  is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
                        return -EINVAL;
                }
 
@@ -383,7 +401,7 @@ add_sync_ep:
                subs->data_endpoint->sync_master = subs->sync_endpoint;
        }
 
-       if ((err = snd_usb_init_pitch(subs->stream->chip, subs->interface, alts, fmt)) < 0)
+       if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0)
                return err;
 
        subs->cur_audiofmt = fmt;
@@ -446,7 +464,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                struct usb_interface *iface;
                iface = usb_ifnum_to_if(subs->dev, fmt->iface);
                alts = &iface->altsetting[fmt->altset_idx];
-               ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+               ret = snd_usb_init_sample_rate(subs->stream->chip, fmt->iface, alts, fmt, rate);
                if (ret < 0)
                        return ret;
                subs->cur_rate = rate;
@@ -456,12 +474,6 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                mutex_lock(&subs->stream->chip->shutdown_mutex);
                /* format changed */
                stop_endpoints(subs, 0, 0, 0);
-               deactivate_endpoints(subs);
-
-               ret = activate_endpoints(subs);
-               if (ret < 0)
-                       goto unlock;
-
                ret = snd_usb_endpoint_set_params(subs->data_endpoint, hw_params, fmt,
                                                  subs->sync_endpoint);
                if (ret < 0)
@@ -496,6 +508,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        subs->period_bytes = 0;
        mutex_lock(&subs->stream->chip->shutdown_mutex);
        stop_endpoints(subs, 0, 1, 1);
+       deactivate_endpoints(subs);
        mutex_unlock(&subs->stream->chip->shutdown_mutex);
        return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
@@ -788,6 +801,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
        int count = 0, needs_knot = 0;
        int err;
 
+       kfree(subs->rate_list.list);
+       subs->rate_list.list = NULL;
+
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
                        return 0;
@@ -931,16 +947,20 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
 
 static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
 {
-       int ret;
        struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
        struct snd_usb_substream *subs = &as->substream[direction];
 
        stop_endpoints(subs, 0, 0, 0);
-       ret = deactivate_endpoints(subs);
+
+       if (!as->chip->shutdown && subs->interface >= 0) {
+               usb_set_interface(subs->dev, subs->interface, 0);
+               subs->interface = -1;
+       }
+
        subs->pcm_substream = NULL;
        snd_usb_autosuspend(subs->stream->chip);
 
-       return ret;
+       return 0;
 }
 
 /* Since a URB can handle only a single linear buffer, we must use double
@@ -1144,7 +1164,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
        return -EINVAL;
 }
 
-int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream,
+                                            int cmd)
 {
        int err;
        struct snd_usb_substream *subs = substream->runtime->private_data;
index d89ab4c7d44b28bc0c52f9e4ec817030059b0692..79780fa57a431345e28896730d12a9fbedf11426 100644 (file)
@@ -1831,6 +1831,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       USB_DEVICE(0x0582, 0x014d),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               /* .vendor_name = "BOSS", */
+               /* .product_name = "GT-100", */
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = & (const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables  = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 
 /* Guillemot devices */
 {
index 6b7d7a2b7baa3a83c68e35917d95515a3caa482a..083ed81160e58b094ee8f4fca637371aa5b5fff4 100644 (file)
@@ -97,6 +97,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
        subs->formats |= fp->formats;
        subs->num_formats++;
        subs->fmt_type = fp->fmt_type;
+       subs->ep_num = fp->endpoint;
 }
 
 /*
@@ -119,9 +120,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
                if (as->fmt_type != fp->fmt_type)
                        continue;
                subs = &as->substream[stream];
-               if (!subs->data_endpoint)
-                       continue;
-               if (subs->data_endpoint->ep_num == fp->endpoint) {
+               if (subs->ep_num == fp->endpoint) {
                        list_add_tail(&fp->list, &subs->fmt_list);
                        subs->num_formats++;
                        subs->formats |= fp->formats;
@@ -134,7 +133,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
                if (as->fmt_type != fp->fmt_type)
                        continue;
                subs = &as->substream[stream];
-               if (subs->data_endpoint)
+               if (subs->ep_num)
                        continue;
                err = snd_pcm_new_stream(as->pcm, stream, 1);
                if (err < 0)
index 146fd6147e84be5cde2a66009f331f1b6ee2b805..d9834b36294373f88d29731350ccc9d384b41788 100644 (file)
@@ -701,14 +701,18 @@ int main(void)
        pfd.fd = fd;
 
        while (1) {
+               struct sockaddr *addr_p = (struct sockaddr *) &addr;
+               socklen_t addr_l = sizeof(addr);
                pfd.events = POLLIN;
                pfd.revents = 0;
                poll(&pfd, 1, -1);
 
-               len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
+               len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+                               addr_p, &addr_l);
 
-               if (len < 0) {
-                       syslog(LOG_ERR, "recv failed; error:%d", len);
+               if (len < 0 || addr.nl_pid) {
+                       syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
+                                       addr.nl_pid, errno, strerror(errno));
                        close(fd);
                        return -1;
                }
index 998534992197591bde7328e44cd2725a7af84797..554828219c33cceaf0c5b9d3f57bd69d4a7a109f 100644 (file)
@@ -1434,8 +1434,11 @@ static int event_read_fields(struct event_format *event, struct format_field **f
 fail:
        free_token(token);
 fail_expect:
-       if (field)
+       if (field) {
+               free(field->type);
+               free(field->name);
                free(field);
+       }
        return -1;
 }
 
@@ -1712,6 +1715,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
 
                if (set_op_prio(arg) == -1) {
                        event->flags |= EVENT_FL_FAILED;
+                       /* arg->op.op (= token) will be freed at out_free */
+                       arg->op.op = NULL;
                        goto out_free;
                }
 
@@ -2124,6 +2129,13 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
 
                free_token(token);
                type = process_arg(event, arg, &token);
+
+               if (type == EVENT_OP)
+                       type = process_op(event, arg, &token);
+
+               if (type == EVENT_ERROR)
+                       goto out_free;
+
                if (test_type_token(type, token, EVENT_DELIM, ","))
                        goto out_free;
 
@@ -2288,17 +2300,18 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
        arg = alloc_arg();
        type = process_arg(event, arg, &token);
        if (type == EVENT_ERROR)
-               goto out_free;
+               goto out_free_arg;
 
        if (!test_type_token(type, token, EVENT_OP, "]"))
-               goto out_free;
+               goto out_free_arg;
 
        free_token(token);
        type = read_token_item(tok);
        return type;
 
+ out_free_arg:
+       free_arg(arg);
  out_free:
-       free(arg);
        free_token(token);
        *tok = NULL;
        return EVENT_ERROR;
@@ -3362,6 +3375,7 @@ process_defined_func(struct trace_seq *s, void *data, int size,
                        break;
                }
                farg = farg->next;
+               param = param->next;
        }
 
        ret = (*func_handle->func)(s, args);
index 2d40c5ed81d66a00fde6cc09917ac32dba196525..dfcfe2c131de6e3d8ea6c26cbf452c2834fd90d7 100644 (file)
@@ -325,9 +325,8 @@ static void free_events(struct event_list *events)
 }
 
 static struct filter_arg *
-create_arg_item(struct event_format *event,
-               const char *token, enum filter_arg_type type,
-               char **error_str)
+create_arg_item(struct event_format *event, const char *token,
+               enum event_type type, char **error_str)
 {
        struct format_field *field;
        struct filter_arg *arg;
@@ -1585,7 +1584,7 @@ get_value(struct event_format *event,
                const char *name;
 
                name = get_comm(event, record);
-               return (unsigned long long)name;
+               return (unsigned long)name;
        }
 
        pevent_read_number_field(field, record->data, &val);
index 42c6fd2ae85d19ac31fa5f0c191dfcf776ca60e9..767ea2436e1cd841a762ee8cdb039ba80a7120b3 100644 (file)
 
        # Default, disable using /dev/null
        dir = /root/.debug
+
+[annotate]
+
+       # Defaults
+       hide_src_code = false
+       use_offset = true
+       jump_arrows = true
+       show_nr_jumps = false
index 5476bc0a1eac8712587b0f1bb3617c8067c76f94..b4b572e8c100af50d9c836905368a8dc4fed8205 100644 (file)
@@ -1,4 +1,6 @@
 tools/perf
+tools/scripts
+tools/lib/traceevent
 include/linux/const.h
 include/linux/perf_event.h
 include/linux/rbtree.h
index 1d3d513beb9b1083c60531f349d5d0c06af64dfa..0eee64cfe9a0f48ced0ee2397068979763355cd4 100644 (file)
@@ -80,7 +80,7 @@ ifeq ("$(origin DEBUG)", "command line")
   PERF_DEBUG = $(DEBUG)
 endif
 ifndef PERF_DEBUG
-  CFLAGS_OPTIMIZE = -O6
+  CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2
 endif
 
 ifdef PARSER_DEBUG
@@ -89,7 +89,7 @@ ifdef PARSER_DEBUG
        PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG
 endif
 
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 ALL_LDFLAGS = $(LDFLAGS)
index 806e0a286634a6bd7b7a766706c32426730c7727..67522cf874053e24ff87d4fa3fca67e6845981be 100644 (file)
@@ -215,7 +215,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
        }
 
        if (total_nr_samples == 0) {
-               ui__warning("The %s file has no samples!\n", session->filename);
+               ui__error("The %s file has no samples!\n", session->filename);
                goto out_delete;
        }
 out_delete:
index e52d77ec7084e02e4fa318e873ff2cf1679a222d..acd78dc283411692f9e7e48b78085d9b5da63471 100644 (file)
@@ -116,7 +116,7 @@ static const char * const evlist_usage[] = {
 int cmd_evlist(int argc, const char **argv, const char *prefix __used)
 {
        struct perf_attr_details details = { .verbose = false, };
-       const char *input_name;
+       const char *input_name = NULL;
        const struct option options[] = {
                OPT_STRING('i', "input", &input_name, "file",
                            "Input file name"),
index e5cb08427e13f56ed7f9223d520a954ef0fc1cf2..f95840d04e4c7a224821e395600df2bbdc7e4323 100644 (file)
@@ -264,7 +264,7 @@ try_again:
                        }
 
                        if (err == ENOENT) {
-                               ui__warning("The %s event is not supported.\n",
+                               ui__error("The %s event is not supported.\n",
                                            event_name(pos));
                                exit(EXIT_FAILURE);
                        }
@@ -858,8 +858,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                usage_with_options(record_usage, record_options);
 
        if (rec->force && rec->append_file) {
-               fprintf(stderr, "Can't overwrite and append at the same time."
-                               " You need to choose between -f and -A");
+               ui__error("Can't overwrite and append at the same time."
+                         " You need to choose between -f and -A");
                usage_with_options(record_usage, record_options);
        } else if (rec->append_file) {
                rec->write_mode = WRITE_APPEND;
@@ -868,8 +868,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        }
 
        if (nr_cgroups && !rec->opts.target.system_wide) {
-               fprintf(stderr, "cgroup monitoring only available in"
-                       " system-wide mode\n");
+               ui__error("cgroup monitoring only available in"
+                         " system-wide mode\n");
                usage_with_options(record_usage, record_options);
        }
 
@@ -905,7 +905,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                int saved_errno = errno;
 
                perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
-               ui__warning("%s", errbuf);
+               ui__error("%s", errbuf);
 
                err = -saved_errno;
                goto out_free_fd;
@@ -933,7 +933,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        else if (rec->opts.freq) {
                rec->opts.default_interval = rec->opts.freq;
        } else {
-               fprintf(stderr, "frequency and count are zero, aborting\n");
+               ui__error("frequency and count are zero, aborting\n");
                err = -EINVAL;
                goto out_free_fd;
        }
index d58e41445d0d6dcfb4ca26fbe36eb9b7e02c596b..25249f76329daf851f925240f8c295c0c3fc5bdc 100644 (file)
@@ -152,7 +152,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
 
        if (symbol_conf.use_callchain) {
                err = callchain_append(he->callchain,
-                                      &evsel->hists.callchain_cursor,
+                                      &callchain_cursor,
                                       sample->period);
                if (err)
                        return err;
@@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
         * so we don't allocated the extra space needed because the stdio
         * code will not use it.
         */
-       if (al->sym != NULL && use_browser > 0) {
+       if (he->ms.sym != NULL && use_browser > 0) {
                struct annotation *notes = symbol__annotation(he->ms.sym);
 
                assert(evsel != NULL);
@@ -251,13 +251,13 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
 
        if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
                if (sort__has_parent) {
-                       ui__warning("Selected --sort parent, but no "
+                       ui__error("Selected --sort parent, but no "
                                    "callchain data. Did you call "
                                    "'perf record' without -g?\n");
                        return -EINVAL;
                }
                if (symbol_conf.use_callchain) {
-                       ui__warning("Selected -g but no callchain data. Did "
+                       ui__error("Selected -g but no callchain data. Did "
                                    "you call 'perf record' without -g?\n");
                        return -1;
                }
@@ -266,17 +266,15 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
                   !symbol_conf.use_callchain) {
                        symbol_conf.use_callchain = true;
                        if (callchain_register_param(&callchain_param) < 0) {
-                               ui__warning("Can't register callchain "
-                                           "params.\n");
+                               ui__error("Can't register callchain params.\n");
                                return -EINVAL;
                        }
        }
 
        if (sort__branch_mode == 1) {
                if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
-                       fprintf(stderr, "selected -b but no branch data."
-                                       " Did you call perf record without"
-                                       " -b?\n");
+                       ui__error("Selected -b but no branch data. "
+                                 "Did you call perf record without -b?\n");
                        return -1;
                }
        }
@@ -420,7 +418,7 @@ static int __cmd_report(struct perf_report *rep)
        }
 
        if (nr_samples == 0) {
-               ui__warning("The %s file has no samples!\n", session->filename);
+               ui__error("The %s file has no samples!\n", session->filename);
                goto out_delete;
        }
 
index 62ae30d34fa6c14734a5f171b52e7b3bb0979257..07b5c7703dd10677e12a5b6e36c8603f18fd0f7e 100644 (file)
@@ -1129,7 +1129,7 @@ static int add_default_attributes(void)
                return 0;
 
        if (!evsel_list->nr_entries) {
-               if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0)
+               if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
                        return -1;
        }
 
@@ -1139,21 +1139,21 @@ static int add_default_attributes(void)
                return 0;
 
        /* Append detailed run extra attributes: */
-       if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0)
+       if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
                return -1;
 
        if (detailed_run < 2)
                return 0;
 
        /* Append very detailed run extra attributes: */
-       if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0)
+       if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
                return -1;
 
        if (detailed_run < 3)
                return 0;
 
        /* Append very, very detailed run extra attributes: */
-       return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs);
+       return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
 }
 
 int cmd_stat(int argc, const char **argv, const char *prefix __used)
@@ -1179,6 +1179,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
                fprintf(stderr, "cannot use both --output and --log-fd\n");
                usage_with_options(stat_usage, options);
        }
+
+       if (output_fd < 0) {
+               fprintf(stderr, "argument to --log-fd must be a > 0\n");
+               usage_with_options(stat_usage, options);
+       }
+
        if (!output) {
                struct timespec tm;
                mode = append_file ? "a" : "w";
@@ -1190,7 +1196,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
                }
                clock_gettime(CLOCK_REALTIME, &tm);
                fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
-       } else if (output_fd != 2) {
+       } else if (output_fd > 0) {
                mode = append_file ? "a" : "w";
                output = fdopen(output_fd, mode);
                if (!output) {
index 6031dce0429f8f93e267d530e9a91fec3ddaf74b..6bb0277b7dfecdbf63e727592995480a409f16f5 100644 (file)
@@ -787,7 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
                }
 
                if (symbol_conf.use_callchain) {
-                       err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
+                       err = callchain_append(he->callchain, &callchain_cursor,
                                               sample->period);
                        if (err)
                                return;
@@ -953,22 +953,22 @@ try_again:
                                attr->config = PERF_COUNT_SW_CPU_CLOCK;
                                if (counter->name) {
                                        free(counter->name);
-                                       counter->name = strdup(event_name(counter));
+                                       counter->name = NULL;
                                }
                                goto try_again;
                        }
 
                        if (err == ENOENT) {
-                               ui__warning("The %s event is not supported.\n",
+                               ui__error("The %s event is not supported.\n",
                                            event_name(counter));
                                goto out_err;
                        } else if (err == EMFILE) {
-                               ui__warning("Too many events are opened.\n"
+                               ui__error("Too many events are opened.\n"
                                            "Try again after reducing the number of events\n");
                                goto out_err;
                        }
 
-                       ui__warning("The sys_perf_event_open() syscall "
+                       ui__error("The sys_perf_event_open() syscall "
                                    "returned with %d (%s).  /bin/dmesg "
                                    "may provide additional information.\n"
                                    "No CONFIG_PERF_EVENTS=y kernel support "
@@ -978,7 +978,7 @@ try_again:
        }
 
        if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
-               ui__warning("Failed to mmap with %d (%s)\n",
+               ui__error("Failed to mmap with %d (%s)\n",
                            errno, strerror(errno));
                goto out_err;
        }
@@ -994,12 +994,12 @@ static int perf_top__setup_sample_type(struct perf_top *top)
 {
        if (!top->sort_has_symbols) {
                if (symbol_conf.use_callchain) {
-                       ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
+                       ui__error("Selected -g but \"sym\" not present in --sort/-s.");
                        return -EINVAL;
                }
        } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
                if (callchain_register_param(&callchain_param) < 0) {
-                       ui__warning("Can't register callchain params.\n");
+                       ui__error("Can't register callchain params.\n");
                        return -EINVAL;
                }
        }
@@ -1041,7 +1041,7 @@ static int __cmd_top(struct perf_top *top)
 
        if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
                                                            display_thread), top)) {
-               printf("Could not create display thread.\n");
+               ui__error("Could not create display thread.\n");
                exit(-1);
        }
 
@@ -1050,7 +1050,7 @@ static int __cmd_top(struct perf_top *top)
 
                param.sched_priority = top->realtime_prio;
                if (sched_setscheduler(0, SCHED_FIFO, &param)) {
-                       printf("Could not set realtime priority.\n");
+                       ui__error("Could not set realtime priority.\n");
                        exit(-1);
                }
        }
@@ -1274,7 +1274,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
                int saved_errno = errno;
 
                perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
-               ui__warning("%s", errbuf);
+               ui__error("%s", errbuf);
 
                status = -saved_errno;
                goto out_delete_evlist;
@@ -1288,7 +1288,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
 
        if (!top.evlist->nr_entries &&
            perf_evlist__add_default(top.evlist) < 0) {
-               pr_err("Not enough memory for event selector list\n");
+               ui__error("Not enough memory for event selector list\n");
                return -ENOMEM;
        }
 
@@ -1305,7 +1305,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
        else if (top.freq) {
                top.default_interval = top.freq;
        } else {
-               fprintf(stderr, "frequency and count are zero, aborting\n");
+               ui__error("frequency and count are zero, aborting\n");
                exit(EXIT_FAILURE);
        }
 
index bd0bb1b1279b8ec258cc40a231fc4a9d289abd96..67e5d0cace85aad7c70c7ba3ac8bbcf95c4d6e10 100644 (file)
@@ -409,14 +409,15 @@ Counters can be enabled and disabled in two ways: via ioctl and via
 prctl.  When a counter is disabled, it doesn't count or generate
 events but does continue to exist and maintain its count value.
 
-An individual counter or counter group can be enabled with
+An individual counter can be enabled with
 
-       ioctl(fd, PERF_EVENT_IOC_ENABLE);
+       ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
 
 or disabled with
 
-       ioctl(fd, PERF_EVENT_IOC_DISABLE);
+       ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
 
+For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument.
 Enabling or disabling the leader of a group enables or disables the
 whole group; that is, while the group leader is disabled, none of the
 counters in the group will count.  Enabling or disabling a member of a
index 14f1034f14f93efbef0b7e5a7ea3f642674fc856..f960ccb2edc6f38f8a7b7a3f0f1b740cbd351c2b 100644 (file)
@@ -227,7 +227,7 @@ struct perf_record_opts {
        unsigned int freq;
        unsigned int mmap_pages;
        unsigned int user_freq;
-       int          branch_stack;
+       u64          branch_stack;
        u64          default_interval;
        u64          user_interval;
 };
index cde4d0f0ddb99c8de13e581d1595ce948a7afe85..1818a531f1d3ea83346e48a131844cb7dc8212a5 100644 (file)
@@ -35,16 +35,16 @@ int ui_browser__set_color(struct ui_browser *browser, int color)
        return ret;
 }
 
-void ui_browser__set_percent_color(struct ui_browser *self,
+void ui_browser__set_percent_color(struct ui_browser *browser,
                                   double percent, bool current)
 {
-        int color = ui_browser__percent_color(self, percent, current);
-        ui_browser__set_color(self, color);
+        int color = ui_browser__percent_color(browser, percent, current);
+        ui_browser__set_color(browser, color);
 }
 
-void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
 {
-       SLsmg_gotorc(self->y + y, self->x + x);
+       SLsmg_gotorc(browser->y + y, browser->x + x);
 }
 
 static struct list_head *
@@ -73,23 +73,23 @@ ui_browser__list_head_filter_prev_entries(struct ui_browser *browser,
        return NULL;
 }
 
-void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence)
 {
-       struct list_head *head = self->entries;
+       struct list_head *head = browser->entries;
        struct list_head *pos;
 
-       if (self->nr_entries == 0)
+       if (browser->nr_entries == 0)
                return;
 
        switch (whence) {
        case SEEK_SET:
-               pos = ui_browser__list_head_filter_entries(self, head->next);
+               pos = ui_browser__list_head_filter_entries(browser, head->next);
                break;
        case SEEK_CUR:
-               pos = self->top;
+               pos = browser->top;
                break;
        case SEEK_END:
-               pos = ui_browser__list_head_filter_prev_entries(self, head->prev);
+               pos = ui_browser__list_head_filter_prev_entries(browser, head->prev);
                break;
        default:
                return;
@@ -99,18 +99,18 @@ void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whenc
 
        if (offset > 0) {
                while (offset-- != 0)
-                       pos = ui_browser__list_head_filter_entries(self, pos->next);
+                       pos = ui_browser__list_head_filter_entries(browser, pos->next);
        } else {
                while (offset++ != 0)
-                       pos = ui_browser__list_head_filter_prev_entries(self, pos->prev);
+                       pos = ui_browser__list_head_filter_prev_entries(browser, pos->prev);
        }
 
-       self->top = pos;
+       browser->top = pos;
 }
 
-void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence)
 {
-       struct rb_root *root = self->entries;
+       struct rb_root *root = browser->entries;
        struct rb_node *nd;
 
        switch (whence) {
@@ -118,7 +118,7 @@ void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
                nd = rb_first(root);
                break;
        case SEEK_CUR:
-               nd = self->top;
+               nd = browser->top;
                break;
        case SEEK_END:
                nd = rb_last(root);
@@ -135,23 +135,23 @@ void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
                        nd = rb_prev(nd);
        }
 
-       self->top = nd;
+       browser->top = nd;
 }
 
-unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
 {
        struct rb_node *nd;
        int row = 0;
 
-       if (self->top == NULL)
-                self->top = rb_first(self->entries);
+       if (browser->top == NULL)
+                browser->top = rb_first(browser->entries);
 
-       nd = self->top;
+       nd = browser->top;
 
        while (nd != NULL) {
-               ui_browser__gotorc(self, row, 0);
-               self->write(self, nd, row);
-               if (++row == self->height)
+               ui_browser__gotorc(browser, row, 0);
+               browser->write(browser, nd, row);
+               if (++row == browser->height)
                        break;
                nd = rb_next(nd);
        }
@@ -159,17 +159,17 @@ unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
        return row;
 }
 
-bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
+bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row)
 {
-       return self->top_idx + row == self->index;
+       return browser->top_idx + row == browser->index;
 }
 
-void ui_browser__refresh_dimensions(struct ui_browser *self)
+void ui_browser__refresh_dimensions(struct ui_browser *browser)
 {
-       self->width = SLtt_Screen_Cols - 1;
-       self->height = SLtt_Screen_Rows - 2;
-       self->y = 1;
-       self->x = 0;
+       browser->width = SLtt_Screen_Cols - 1;
+       browser->height = SLtt_Screen_Rows - 2;
+       browser->y = 1;
+       browser->x = 0;
 }
 
 void ui_browser__handle_resize(struct ui_browser *browser)
@@ -225,10 +225,10 @@ bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text)
        return key == K_ENTER || toupper(key) == 'Y';
 }
 
-void ui_browser__reset_index(struct ui_browser *self)
+void ui_browser__reset_index(struct ui_browser *browser)
 {
-       self->index = self->top_idx = 0;
-       self->seek(self, 0, SEEK_SET);
+       browser->index = browser->top_idx = 0;
+       browser->seek(browser, 0, SEEK_SET);
 }
 
 void __ui_browser__show_title(struct ui_browser *browser, const char *title)
@@ -245,26 +245,26 @@ void ui_browser__show_title(struct ui_browser *browser, const char *title)
        pthread_mutex_unlock(&ui__lock);
 }
 
-int ui_browser__show(struct ui_browser *self, const char *title,
+int ui_browser__show(struct ui_browser *browser, const char *title,
                     const char *helpline, ...)
 {
        int err;
        va_list ap;
 
-       ui_browser__refresh_dimensions(self);
+       ui_browser__refresh_dimensions(browser);
 
        pthread_mutex_lock(&ui__lock);
-       __ui_browser__show_title(self, title);
+       __ui_browser__show_title(browser, title);
 
-       self->title = title;
-       free(self->helpline);
-       self->helpline = NULL;
+       browser->title = title;
+       free(browser->helpline);
+       browser->helpline = NULL;
 
        va_start(ap, helpline);
-       err = vasprintf(&self->helpline, helpline, ap);
+       err = vasprintf(&browser->helpline, helpline, ap);
        va_end(ap);
        if (err > 0)
-               ui_helpline__push(self->helpline);
+               ui_helpline__push(browser->helpline);
        pthread_mutex_unlock(&ui__lock);
        return err ? 0 : -1;
 }
@@ -350,7 +350,7 @@ void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries)
        browser->seek(browser, browser->top_idx, SEEK_SET);
 }
 
-int ui_browser__run(struct ui_browser *self, int delay_secs)
+int ui_browser__run(struct ui_browser *browser, int delay_secs)
 {
        int err, key;
 
@@ -358,7 +358,7 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
                off_t offset;
 
                pthread_mutex_lock(&ui__lock);
-               err = __ui_browser__refresh(self);
+               err = __ui_browser__refresh(browser);
                SLsmg_refresh();
                pthread_mutex_unlock(&ui__lock);
                if (err < 0)
@@ -368,18 +368,18 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
 
                if (key == K_RESIZE) {
                        ui__refresh_dimensions(false);
-                       ui_browser__refresh_dimensions(self);
-                       __ui_browser__show_title(self, self->title);
-                       ui_helpline__puts(self->helpline);
+                       ui_browser__refresh_dimensions(browser);
+                       __ui_browser__show_title(browser, browser->title);
+                       ui_helpline__puts(browser->helpline);
                        continue;
                }
 
-               if (self->use_navkeypressed && !self->navkeypressed) {
+               if (browser->use_navkeypressed && !browser->navkeypressed) {
                        if (key == K_DOWN || key == K_UP ||
                            key == K_PGDN || key == K_PGUP ||
                            key == K_HOME || key == K_END ||
                            key == ' ') {
-                               self->navkeypressed = true;
+                               browser->navkeypressed = true;
                                continue;
                        } else
                                return key;
@@ -387,59 +387,59 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
 
                switch (key) {
                case K_DOWN:
-                       if (self->index == self->nr_entries - 1)
+                       if (browser->index == browser->nr_entries - 1)
                                break;
-                       ++self->index;
-                       if (self->index == self->top_idx + self->height) {
-                               ++self->top_idx;
-                               self->seek(self, +1, SEEK_CUR);
+                       ++browser->index;
+                       if (browser->index == browser->top_idx + browser->height) {
+                               ++browser->top_idx;
+                               browser->seek(browser, +1, SEEK_CUR);
                        }
                        break;
                case K_UP:
-                       if (self->index == 0)
+                       if (browser->index == 0)
                                break;
-                       --self->index;
-                       if (self->index < self->top_idx) {
-                               --self->top_idx;
-                               self->seek(self, -1, SEEK_CUR);
+                       --browser->index;
+                       if (browser->index < browser->top_idx) {
+                               --browser->top_idx;
+                               browser->seek(browser, -1, SEEK_CUR);
                        }
                        break;
                case K_PGDN:
                case ' ':
-                       if (self->top_idx + self->height > self->nr_entries - 1)
+                       if (browser->top_idx + browser->height > browser->nr_entries - 1)
                                break;
 
-                       offset = self->height;
-                       if (self->index + offset > self->nr_entries - 1)
-                               offset = self->nr_entries - 1 - self->index;
-                       self->index += offset;
-                       self->top_idx += offset;
-                       self->seek(self, +offset, SEEK_CUR);
+                       offset = browser->height;
+                       if (browser->index + offset > browser->nr_entries - 1)
+                               offset = browser->nr_entries - 1 - browser->index;
+                       browser->index += offset;
+                       browser->top_idx += offset;
+                       browser->seek(browser, +offset, SEEK_CUR);
                        break;
                case K_PGUP:
-                       if (self->top_idx == 0)
+                       if (browser->top_idx == 0)
                                break;
 
-                       if (self->top_idx < self->height)
-                               offset = self->top_idx;
+                       if (browser->top_idx < browser->height)
+                               offset = browser->top_idx;
                        else
-                               offset = self->height;
+                               offset = browser->height;
 
-                       self->index -= offset;
-                       self->top_idx -= offset;
-                       self->seek(self, -offset, SEEK_CUR);
+                       browser->index -= offset;
+                       browser->top_idx -= offset;
+                       browser->seek(browser, -offset, SEEK_CUR);
                        break;
                case K_HOME:
-                       ui_browser__reset_index(self);
+                       ui_browser__reset_index(browser);
                        break;
                case K_END:
-                       offset = self->height - 1;
-                       if (offset >= self->nr_entries)
-                               offset = self->nr_entries - 1;
+                       offset = browser->height - 1;
+                       if (offset >= browser->nr_entries)
+                               offset = browser->nr_entries - 1;
 
-                       self->index = self->nr_entries - 1;
-                       self->top_idx = self->index - offset;
-                       self->seek(self, -offset, SEEK_END);
+                       browser->index = browser->nr_entries - 1;
+                       browser->top_idx = browser->index - offset;
+                       browser->seek(browser, -offset, SEEK_END);
                        break;
                default:
                        return key;
@@ -448,22 +448,22 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
        return -1;
 }
 
-unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
+unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
 {
        struct list_head *pos;
-       struct list_head *head = self->entries;
+       struct list_head *head = browser->entries;
        int row = 0;
 
-       if (self->top == NULL || self->top == self->entries)
-                self->top = ui_browser__list_head_filter_entries(self, head->next);
+       if (browser->top == NULL || browser->top == browser->entries)
+                browser->top = ui_browser__list_head_filter_entries(browser, head->next);
 
-       pos = self->top;
+       pos = browser->top;
 
        list_for_each_from(pos, head) {
-               if (!self->filter || !self->filter(self, pos)) {
-                       ui_browser__gotorc(self, row, 0);
-                       self->write(self, pos, row);
-                       if (++row == self->height)
+               if (!browser->filter || !browser->filter(browser, pos)) {
+                       ui_browser__gotorc(browser, row, 0);
+                       browser->write(browser, pos, row);
+                       if (++row == browser->height)
                                break;
                }
        }
@@ -708,4 +708,6 @@ void ui_browser__init(void)
                struct ui_browser__colorset *c = &ui_browser__colorsets[i++];
                sltt_set_color(c->colorset, c->name, c->fg, c->bg);
        }
+
+       annotate_browser__init();
 }
index dd96d82299022c0bb67924752177d3e6c2eaa096..af70314605e54e2468fe774822cdeecd2945cc7c 100644 (file)
@@ -69,4 +69,5 @@ void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whenc
 unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
 
 void ui_browser__init(void);
+void annotate_browser__init(void);
 #endif /* _PERF_UI_BROWSER_H_ */
index 6e0ef79be16907a781bcf31be96377671412f440..34b1c46eaf42c63d36ed5c36cbaceea38c2347a9 100644 (file)
@@ -19,6 +19,16 @@ struct browser_disasm_line {
        int             jump_sources;
 };
 
+static struct annotate_browser_opt {
+       bool hide_src_code,
+            use_offset,
+            jump_arrows,
+            show_nr_jumps;
+} annotate_browser__opts = {
+       .use_offset     = true,
+       .jump_arrows    = true,
+};
+
 struct annotate_browser {
        struct ui_browser b;
        struct rb_root    entries;
@@ -30,10 +40,6 @@ struct annotate_browser {
        int                 nr_entries;
        int                 max_jump_sources;
        int                 nr_jumps;
-       bool                hide_src_code;
-       bool                use_offset;
-       bool                jump_arrows;
-       bool                show_nr_jumps;
        bool                searching_backwards;
        u8                  addr_width;
        u8                  jumps_width;
@@ -48,11 +54,9 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin
        return (struct browser_disasm_line *)(dl + 1);
 }
 
-static bool disasm_line__filter(struct ui_browser *browser, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __used, void *entry)
 {
-       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
-
-       if (ab->hide_src_code) {
+       if (annotate_browser__opts.hide_src_code) {
                struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
                return dl->offset == -1;
        }
@@ -79,30 +83,30 @@ static int annotate_browser__set_jumps_percent_color(struct annotate_browser *br
         return ui_browser__set_color(&browser->b, color);
 }
 
-static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
+static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
 {
-       struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
+       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
        struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
        struct browser_disasm_line *bdl = disasm_line__browser(dl);
-       bool current_entry = ui_browser__is_current_entry(self, row);
-       bool change_color = (!ab->hide_src_code &&
-                            (!current_entry || (self->use_navkeypressed &&
-                                                !self->navkeypressed)));
-       int width = self->width, printed;
+       bool current_entry = ui_browser__is_current_entry(browser, row);
+       bool change_color = (!annotate_browser__opts.hide_src_code &&
+                            (!current_entry || (browser->use_navkeypressed &&
+                                                !browser->navkeypressed)));
+       int width = browser->width, printed;
        char bf[256];
 
        if (dl->offset != -1 && bdl->percent != 0.0) {
-               ui_browser__set_percent_color(self, bdl->percent, current_entry);
+               ui_browser__set_percent_color(browser, bdl->percent, current_entry);
                slsmg_printf("%6.2f ", bdl->percent);
        } else {
-               ui_browser__set_percent_color(self, 0, current_entry);
+               ui_browser__set_percent_color(browser, 0, current_entry);
                slsmg_write_nstring(" ", 7);
        }
 
        SLsmg_write_char(' ');
 
        /* The scroll bar isn't being used */
-       if (!self->navkeypressed)
+       if (!browser->navkeypressed)
                width += 1;
 
        if (!*dl->line)
@@ -116,14 +120,14 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                u64 addr = dl->offset;
                int color = -1;
 
-               if (!ab->use_offset)
+               if (!annotate_browser__opts.use_offset)
                        addr += ab->start;
 
-               if (!ab->use_offset) {
+               if (!annotate_browser__opts.use_offset) {
                        printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
                } else {
                        if (bdl->jump_sources) {
-                               if (ab->show_nr_jumps) {
+                               if (annotate_browser__opts.show_nr_jumps) {
                                        int prev;
                                        printed = scnprintf(bf, sizeof(bf), "%*d ",
                                                            ab->jumps_width,
@@ -131,7 +135,7 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                                        prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
                                                                                         current_entry);
                                        slsmg_write_nstring(bf, printed);
-                                       ui_browser__set_color(self, prev);
+                                       ui_browser__set_color(browser, prev);
                                }
 
                                printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
@@ -143,19 +147,19 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                }
 
                if (change_color)
-                       color = ui_browser__set_color(self, HE_COLORSET_ADDR);
+                       color = ui_browser__set_color(browser, HE_COLORSET_ADDR);
                slsmg_write_nstring(bf, printed);
                if (change_color)
-                       ui_browser__set_color(self, color);
+                       ui_browser__set_color(browser, color);
                if (dl->ins && dl->ins->ops->scnprintf) {
                        if (ins__is_jump(dl->ins)) {
                                bool fwd = dl->ops.target.offset > (u64)dl->offset;
 
-                               ui_browser__write_graph(self, fwd ? SLSMG_DARROW_CHAR :
+                               ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
                                                                    SLSMG_UARROW_CHAR);
                                SLsmg_write_char(' ');
                        } else if (ins__is_call(dl->ins)) {
-                               ui_browser__write_graph(self, SLSMG_RARROW_CHAR);
+                               ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
                                SLsmg_write_char(' ');
                        } else {
                                slsmg_write_nstring(" ", 2);
@@ -164,12 +168,12 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                        if (strcmp(dl->name, "retq")) {
                                slsmg_write_nstring(" ", 2);
                        } else {
-                               ui_browser__write_graph(self, SLSMG_LARROW_CHAR);
+                               ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
                                SLsmg_write_char(' ');
                        }
                }
 
-               disasm_line__scnprintf(dl, bf, sizeof(bf), !ab->use_offset);
+               disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
                slsmg_write_nstring(bf, width - 10 - printed);
        }
 
@@ -184,7 +188,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        struct browser_disasm_line *btarget, *bcursor;
        unsigned int from, to;
 
-       if (!cursor->ins || !ins__is_jump(cursor->ins) ||
+       if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) ||
            !disasm_line__has_offset(cursor))
                return;
 
@@ -195,7 +199,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        bcursor = disasm_line__browser(cursor);
        btarget = disasm_line__browser(target);
 
-       if (ab->hide_src_code) {
+       if (annotate_browser__opts.hide_src_code) {
                from = bcursor->idx_asm;
                to = btarget->idx_asm;
        } else {
@@ -209,10 +213,9 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
 
 static unsigned int annotate_browser__refresh(struct ui_browser *browser)
 {
-       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
        int ret = ui_browser__list_head_refresh(browser);
 
-       if (ab->jump_arrows)
+       if (annotate_browser__opts.jump_arrows)
                annotate_browser__draw_current_jump(browser);
 
        ui_browser__set_color(browser, HE_COLORSET_NORMAL);
@@ -272,27 +275,27 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_l
        rb_insert_color(&bdl->rb_node, root);
 }
 
-static void annotate_browser__set_top(struct annotate_browser *self,
+static void annotate_browser__set_top(struct annotate_browser *browser,
                                      struct disasm_line *pos, u32 idx)
 {
        unsigned back;
 
-       ui_browser__refresh_dimensions(&self->b);
-       back = self->b.height / 2;
-       self->b.top_idx = self->b.index = idx;
+       ui_browser__refresh_dimensions(&browser->b);
+       back = browser->b.height / 2;
+       browser->b.top_idx = browser->b.index = idx;
 
-       while (self->b.top_idx != 0 && back != 0) {
+       while (browser->b.top_idx != 0 && back != 0) {
                pos = list_entry(pos->node.prev, struct disasm_line, node);
 
-               if (disasm_line__filter(&self->b, &pos->node))
+               if (disasm_line__filter(&browser->b, &pos->node))
                        continue;
 
-               --self->b.top_idx;
+               --browser->b.top_idx;
                --back;
        }
 
-       self->b.top = pos;
-       self->b.navkeypressed = true;
+       browser->b.top = pos;
+       browser->b.navkeypressed = true;
 }
 
 static void annotate_browser__set_rb_top(struct annotate_browser *browser,
@@ -300,10 +303,14 @@ static void annotate_browser__set_rb_top(struct annotate_browser *browser,
 {
        struct browser_disasm_line *bpos;
        struct disasm_line *pos;
+       u32 idx;
 
        bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
        pos = ((struct disasm_line *)bpos) - 1;
-       annotate_browser__set_top(browser, pos, bpos->idx);
+       idx = bpos->idx;
+       if (annotate_browser__opts.hide_src_code)
+               idx = bpos->idx_asm;
+       annotate_browser__set_top(browser, pos, idx);
        browser->curr_hot = nd;
 }
 
@@ -343,12 +350,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
        dl = list_entry(browser->b.top, struct disasm_line, node);
        bdl = disasm_line__browser(dl);
 
-       if (browser->hide_src_code) {
+       if (annotate_browser__opts.hide_src_code) {
                if (bdl->idx_asm < offset)
                        offset = bdl->idx;
 
                browser->b.nr_entries = browser->nr_entries;
-               browser->hide_src_code = false;
+               annotate_browser__opts.hide_src_code = false;
                browser->b.seek(&browser->b, -offset, SEEK_CUR);
                browser->b.top_idx = bdl->idx - offset;
                browser->b.index = bdl->idx;
@@ -363,7 +370,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
                        offset = bdl->idx_asm;
 
                browser->b.nr_entries = browser->nr_asm_entries;
-               browser->hide_src_code = true;
+               annotate_browser__opts.hide_src_code = true;
                browser->b.seek(&browser->b, -offset, SEEK_CUR);
                browser->b.top_idx = bdl->idx_asm - offset;
                browser->b.index = bdl->idx_asm;
@@ -372,6 +379,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
        return true;
 }
 
+static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
+{
+       ui_browser__reset_index(&browser->b);
+       browser->b.nr_entries = browser->nr_asm_entries;
+}
+
 static bool annotate_browser__callq(struct annotate_browser *browser,
                                    int evidx, void (*timer)(void *arg),
                                    void *arg, int delay_secs)
@@ -574,33 +587,46 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
        return __annotate_browser__search_reverse(browser);
 }
 
-static int annotate_browser__run(struct annotate_browser *self, int evidx,
+static void annotate_browser__update_addr_width(struct annotate_browser *browser)
+{
+       if (annotate_browser__opts.use_offset)
+               browser->target_width = browser->min_addr_width;
+       else
+               browser->target_width = browser->max_addr_width;
+
+       browser->addr_width = browser->target_width;
+
+       if (annotate_browser__opts.show_nr_jumps)
+               browser->addr_width += browser->jumps_width + 1;
+}
+
+static int annotate_browser__run(struct annotate_browser *browser, int evidx,
                                 void(*timer)(void *arg),
                                 void *arg, int delay_secs)
 {
        struct rb_node *nd = NULL;
-       struct map_symbol *ms = self->b.priv;
+       struct map_symbol *ms = browser->b.priv;
        struct symbol *sym = ms->sym;
        const char *help = "Press 'h' for help on key bindings";
        int key;
 
-       if (ui_browser__show(&self->b, sym->name, help) < 0)
+       if (ui_browser__show(&browser->b, sym->name, help) < 0)
                return -1;
 
-       annotate_browser__calc_percent(self, evidx);
+       annotate_browser__calc_percent(browser, evidx);
 
-       if (self->curr_hot) {
-               annotate_browser__set_rb_top(self, self->curr_hot);
-               self->b.navkeypressed = false;
+       if (browser->curr_hot) {
+               annotate_browser__set_rb_top(browser, browser->curr_hot);
+               browser->b.navkeypressed = false;
        }
 
-       nd = self->curr_hot;
+       nd = browser->curr_hot;
 
        while (1) {
-               key = ui_browser__run(&self->b, delay_secs);
+               key = ui_browser__run(&browser->b, delay_secs);
 
                if (delay_secs != 0) {
-                       annotate_browser__calc_percent(self, evidx);
+                       annotate_browser__calc_percent(browser, evidx);
                        /*
                         * Current line focus got out of the list of most active
                         * lines, NULL it so that if TAB|UNTAB is pressed, we
@@ -622,27 +648,27 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
                        if (nd != NULL) {
                                nd = rb_prev(nd);
                                if (nd == NULL)
-                                       nd = rb_last(&self->entries);
+                                       nd = rb_last(&browser->entries);
                        } else
-                               nd = self->curr_hot;
+                               nd = browser->curr_hot;
                        break;
                case K_UNTAB:
                        if (nd != NULL)
                                nd = rb_next(nd);
                                if (nd == NULL)
-                                       nd = rb_first(&self->entries);
+                                       nd = rb_first(&browser->entries);
                        else
-                               nd = self->curr_hot;
+                               nd = browser->curr_hot;
                        break;
                case K_F1:
                case 'h':
-                       ui_browser__help_window(&self->b,
+                       ui_browser__help_window(&browser->b,
                "UP/DOWN/PGUP\n"
                "PGDN/SPACE    Navigate\n"
                "q/ESC/CTRL+C  Exit\n\n"
                "->            Go to target\n"
                "<-            Exit\n"
-               "h             Cycle thru hottest instructions\n"
+               "H             Cycle thru hottest instructions\n"
                "j             Toggle showing jump to target arrows\n"
                "J             Toggle showing number of jump sources on targets\n"
                "n             Search next string\n"
@@ -652,57 +678,62 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
                "?             Search previous string\n");
                        continue;
                case 'H':
-                       nd = self->curr_hot;
+                       nd = browser->curr_hot;
                        break;
                case 's':
-                       if (annotate_browser__toggle_source(self))
+                       if (annotate_browser__toggle_source(browser))
                                ui_helpline__puts(help);
                        continue;
                case 'o':
-                       self->use_offset = !self->use_offset;
-                       if (self->use_offset)
-                               self->target_width = self->min_addr_width;
-                       else
-                               self->target_width = self->max_addr_width;
-update_addr_width:
-                       self->addr_width = self->target_width;
-                       if (self->show_nr_jumps)
-                               self->addr_width += self->jumps_width + 1;
+                       annotate_browser__opts.use_offset = !annotate_browser__opts.use_offset;
+                       annotate_browser__update_addr_width(browser);
                        continue;
                case 'j':
-                       self->jump_arrows = !self->jump_arrows;
+                       annotate_browser__opts.jump_arrows = !annotate_browser__opts.jump_arrows;
                        continue;
                case 'J':
-                       self->show_nr_jumps = !self->show_nr_jumps;
-                       goto update_addr_width;
+                       annotate_browser__opts.show_nr_jumps = !annotate_browser__opts.show_nr_jumps;
+                       annotate_browser__update_addr_width(browser);
+                       continue;
                case '/':
-                       if (annotate_browser__search(self, delay_secs)) {
+                       if (annotate_browser__search(browser, delay_secs)) {
 show_help:
                                ui_helpline__puts(help);
                        }
                        continue;
                case 'n':
-                       if (self->searching_backwards ?
-                           annotate_browser__continue_search_reverse(self, delay_secs) :
-                           annotate_browser__continue_search(self, delay_secs))
+                       if (browser->searching_backwards ?
+                           annotate_browser__continue_search_reverse(browser, delay_secs) :
+                           annotate_browser__continue_search(browser, delay_secs))
                                goto show_help;
                        continue;
                case '?':
-                       if (annotate_browser__search_reverse(self, delay_secs))
+                       if (annotate_browser__search_reverse(browser, delay_secs))
                                goto show_help;
                        continue;
+               case 'D': {
+                       static int seq;
+                       ui_helpline__pop();
+                       ui_helpline__fpush("%d: nr_ent=%d, height=%d, idx=%d, top_idx=%d, nr_asm_entries=%d",
+                                          seq++, browser->b.nr_entries,
+                                          browser->b.height,
+                                          browser->b.index,
+                                          browser->b.top_idx,
+                                          browser->nr_asm_entries);
+               }
+                       continue;
                case K_ENTER:
                case K_RIGHT:
-                       if (self->selection == NULL)
+                       if (browser->selection == NULL)
                                ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
-                       else if (self->selection->offset == -1)
+                       else if (browser->selection->offset == -1)
                                ui_helpline__puts("Actions are only available for assembly lines.");
-                       else if (!self->selection->ins) {
-                               if (strcmp(self->selection->name, "retq"))
+                       else if (!browser->selection->ins) {
+                               if (strcmp(browser->selection->name, "retq"))
                                        goto show_sup_ins;
                                goto out;
-                       } else if (!(annotate_browser__jump(self) ||
-                                    annotate_browser__callq(self, evidx, timer, arg, delay_secs))) {
+                       } else if (!(annotate_browser__jump(browser) ||
+                                    annotate_browser__callq(browser, evidx, timer, arg, delay_secs))) {
 show_sup_ins:
                                ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
                        }
@@ -717,10 +748,10 @@ show_sup_ins:
                }
 
                if (nd != NULL)
-                       annotate_browser__set_rb_top(self, nd);
+                       annotate_browser__set_rb_top(browser, nd);
        }
 out:
-       ui_browser__hide(&self->b);
+       ui_browser__hide(&browser->b);
        return key;
 }
 
@@ -797,8 +828,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
                        .priv    = &ms,
                        .use_navkeypressed = true,
                },
-               .use_offset = true,
-               .jump_arrows = true,
        };
        int ret = -1;
 
@@ -855,6 +884,12 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
        browser.b.nr_entries = browser.nr_entries;
        browser.b.entries = &notes->src->source,
        browser.b.width += 18; /* Percentage */
+
+       if (annotate_browser__opts.hide_src_code)
+               annotate_browser__init_asm_mode(&browser);
+
+       annotate_browser__update_addr_width(&browser);
+
        ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
        list_for_each_entry_safe(pos, n, &notes->src->source, node) {
                list_del(&pos->node);
@@ -865,3 +900,52 @@ out_free_offsets:
        free(browser.offsets);
        return ret;
 }
+
+#define ANNOTATE_CFG(n) \
+       { .name = #n, .value = &annotate_browser__opts.n, }
+       
+/*
+ * Keep the entries sorted, they are bsearch'ed
+ */
+static struct annotate__config {
+       const char *name;
+       bool *value;
+} annotate__configs[] = {
+       ANNOTATE_CFG(hide_src_code),
+       ANNOTATE_CFG(jump_arrows),
+       ANNOTATE_CFG(show_nr_jumps),
+       ANNOTATE_CFG(use_offset),
+};
+
+#undef ANNOTATE_CFG
+
+static int annotate_config__cmp(const void *name, const void *cfgp)
+{
+       const struct annotate__config *cfg = cfgp;
+
+       return strcmp(name, cfg->name);
+}
+
+static int annotate__config(const char *var, const char *value, void *data __used)
+{
+       struct annotate__config *cfg;
+       const char *name;
+
+       if (prefixcmp(var, "annotate.") != 0)
+               return 0;
+
+       name = var + 9;
+       cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs),
+                     sizeof(struct annotate__config), annotate_config__cmp);
+
+       if (cfg == NULL)
+               return -1;
+
+       *cfg->value = perf_config_bool(name, value);
+       return 0;
+}
+
+void annotate_browser__init(void)
+{
+       perf_config(annotate__config, NULL);
+}
index a372a4b026354b4d9f3984bc7fd3b96a5523f95a..53f6697d014e788396b474c6be957893b8fd09ca 100644 (file)
@@ -26,21 +26,21 @@ struct hist_browser {
        bool                 has_symbols;
 };
 
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
                                const char *ev_name);
 
-static void hist_browser__refresh_dimensions(struct hist_browser *self)
+static void hist_browser__refresh_dimensions(struct hist_browser *browser)
 {
        /* 3 == +/- toggle symbol before actual hist_entry rendering */
-       self->b.width = 3 + (hists__sort_list_width(self->hists) +
+       browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
                             sizeof("[k]"));
 }
 
-static void hist_browser__reset(struct hist_browser *self)
+static void hist_browser__reset(struct hist_browser *browser)
 {
-       self->b.nr_entries = self->hists->nr_entries;
-       hist_browser__refresh_dimensions(self);
-       ui_browser__reset_index(&self->b);
+       browser->b.nr_entries = browser->hists->nr_entries;
+       hist_browser__refresh_dimensions(browser);
+       ui_browser__reset_index(&browser->b);
 }
 
 static char tree__folded_sign(bool unfolded)
@@ -48,32 +48,32 @@ static char tree__folded_sign(bool unfolded)
        return unfolded ? '-' : '+';
 }
 
-static char map_symbol__folded(const struct map_symbol *self)
+static char map_symbol__folded(const struct map_symbol *ms)
 {
-       return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
+       return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
 }
 
-static char hist_entry__folded(const struct hist_entry *self)
+static char hist_entry__folded(const struct hist_entry *he)
 {
-       return map_symbol__folded(&self->ms);
+       return map_symbol__folded(&he->ms);
 }
 
-static char callchain_list__folded(const struct callchain_list *self)
+static char callchain_list__folded(const struct callchain_list *cl)
 {
-       return map_symbol__folded(&self->ms);
+       return map_symbol__folded(&cl->ms);
 }
 
-static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
 {
-       self->unfolded = unfold ? self->has_children : false;
+       ms->unfolded = unfold ? ms->has_children : false;
 }
 
-static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
+static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
 {
        int n = 0;
        struct rb_node *nd;
 
-       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
                char folded_sign = ' '; /* No children */
@@ -123,23 +123,23 @@ static int callchain__count_rows(struct rb_root *chain)
        return n;
 }
 
-static bool map_symbol__toggle_fold(struct map_symbol *self)
+static bool map_symbol__toggle_fold(struct map_symbol *ms)
 {
-       if (!self)
+       if (!ms)
                return false;
 
-       if (!self->has_children)
+       if (!ms->has_children)
                return false;
 
-       self->unfolded = !self->unfolded;
+       ms->unfolded = !ms->unfolded;
        return true;
 }
 
-static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
+static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
 {
-       struct rb_node *nd = rb_first(&self->rb_root);
+       struct rb_node *nd = rb_first(&node->rb_root);
 
-       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
                bool first = true;
@@ -158,49 +158,49 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *se
        }
 }
 
-static void callchain_node__init_have_children(struct callchain_node *self)
+static void callchain_node__init_have_children(struct callchain_node *node)
 {
        struct callchain_list *chain;
 
-       list_for_each_entry(chain, &self->val, list)
-               chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
+       list_for_each_entry(chain, &node->val, list)
+               chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
 
-       callchain_node__init_have_children_rb_tree(self);
+       callchain_node__init_have_children_rb_tree(node);
 }
 
-static void callchain__init_have_children(struct rb_root *self)
+static void callchain__init_have_children(struct rb_root *root)
 {
        struct rb_node *nd;
 
-       for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
                struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
                callchain_node__init_have_children(node);
        }
 }
 
-static void hist_entry__init_have_children(struct hist_entry *self)
+static void hist_entry__init_have_children(struct hist_entry *he)
 {
-       if (!self->init_have_children) {
-               self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
-               callchain__init_have_children(&self->sorted_chain);
-               self->init_have_children = true;
+       if (!he->init_have_children) {
+               he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
+               callchain__init_have_children(&he->sorted_chain);
+               he->init_have_children = true;
        }
 }
 
-static bool hist_browser__toggle_fold(struct hist_browser *self)
+static bool hist_browser__toggle_fold(struct hist_browser *browser)
 {
-       if (map_symbol__toggle_fold(self->selection)) {
-               struct hist_entry *he = self->he_selection;
+       if (map_symbol__toggle_fold(browser->selection)) {
+               struct hist_entry *he = browser->he_selection;
 
                hist_entry__init_have_children(he);
-               self->hists->nr_entries -= he->nr_rows;
+               browser->hists->nr_entries -= he->nr_rows;
 
                if (he->ms.unfolded)
                        he->nr_rows = callchain__count_rows(&he->sorted_chain);
                else
                        he->nr_rows = 0;
-               self->hists->nr_entries += he->nr_rows;
-               self->b.nr_entries = self->hists->nr_entries;
+               browser->hists->nr_entries += he->nr_rows;
+               browser->b.nr_entries = browser->hists->nr_entries;
 
                return true;
        }
@@ -209,12 +209,12 @@ static bool hist_browser__toggle_fold(struct hist_browser *self)
        return false;
 }
 
-static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
 {
        int n = 0;
        struct rb_node *nd;
 
-       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
                bool has_children = false;
@@ -263,37 +263,37 @@ static int callchain__set_folding(struct rb_root *chain, bool unfold)
        return n;
 }
 
-static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
 {
-       hist_entry__init_have_children(self);
-       map_symbol__set_folding(&self->ms, unfold);
+       hist_entry__init_have_children(he);
+       map_symbol__set_folding(&he->ms, unfold);
 
-       if (self->ms.has_children) {
-               int n = callchain__set_folding(&self->sorted_chain, unfold);
-               self->nr_rows = unfold ? n : 0;
+       if (he->ms.has_children) {
+               int n = callchain__set_folding(&he->sorted_chain, unfold);
+               he->nr_rows = unfold ? n : 0;
        } else
-               self->nr_rows = 0;
+               he->nr_rows = 0;
 }
 
-static void hists__set_folding(struct hists *self, bool unfold)
+static void hists__set_folding(struct hists *hists, bool unfold)
 {
        struct rb_node *nd;
 
-       self->nr_entries = 0;
+       hists->nr_entries = 0;
 
-       for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
                struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
                hist_entry__set_folding(he, unfold);
-               self->nr_entries += 1 + he->nr_rows;
+               hists->nr_entries += 1 + he->nr_rows;
        }
 }
 
-static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
 {
-       hists__set_folding(self->hists, unfold);
-       self->b.nr_entries = self->hists->nr_entries;
+       hists__set_folding(browser->hists, unfold);
+       browser->b.nr_entries = browser->hists->nr_entries;
        /* Go to the start, we may be way after valid entries after a collapse */
-       ui_browser__reset_index(&self->b);
+       ui_browser__reset_index(&browser->b);
 }
 
 static void ui_browser__warn_lost_events(struct ui_browser *browser)
@@ -305,64 +305,64 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
                "Or reduce the sampling frequency.");
 }
 
-static int hist_browser__run(struct hist_browser *self, const char *ev_name,
+static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
                             void(*timer)(void *arg), void *arg, int delay_secs)
 {
        int key;
        char title[160];
 
-       self->b.entries = &self->hists->entries;
-       self->b.nr_entries = self->hists->nr_entries;
+       browser->b.entries = &browser->hists->entries;
+       browser->b.nr_entries = browser->hists->nr_entries;
 
-       hist_browser__refresh_dimensions(self);
-       hists__browser_title(self->hists, title, sizeof(title), ev_name);
+       hist_browser__refresh_dimensions(browser);
+       hists__browser_title(browser->hists, title, sizeof(title), ev_name);
 
-       if (ui_browser__show(&self->b, title,
+       if (ui_browser__show(&browser->b, title,
                             "Press '?' for help on key bindings") < 0)
                return -1;
 
        while (1) {
-               key = ui_browser__run(&self->b, delay_secs);
+               key = ui_browser__run(&browser->b, delay_secs);
 
                switch (key) {
                case K_TIMER:
                        timer(arg);
-                       ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
+                       ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
 
-                       if (self->hists->stats.nr_lost_warned !=
-                           self->hists->stats.nr_events[PERF_RECORD_LOST]) {
-                               self->hists->stats.nr_lost_warned =
-                                       self->hists->stats.nr_events[PERF_RECORD_LOST];
-                               ui_browser__warn_lost_events(&self->b);
+                       if (browser->hists->stats.nr_lost_warned !=
+                           browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
+                               browser->hists->stats.nr_lost_warned =
+                                       browser->hists->stats.nr_events[PERF_RECORD_LOST];
+                               ui_browser__warn_lost_events(&browser->b);
                        }
 
-                       hists__browser_title(self->hists, title, sizeof(title), ev_name);
-                       ui_browser__show_title(&self->b, title);
+                       hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+                       ui_browser__show_title(&browser->b, title);
                        continue;
                case 'D': { /* Debug */
                        static int seq;
-                       struct hist_entry *h = rb_entry(self->b.top,
+                       struct hist_entry *h = rb_entry(browser->b.top,
                                                        struct hist_entry, rb_node);
                        ui_helpline__pop();
                        ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
-                                          seq++, self->b.nr_entries,
-                                          self->hists->nr_entries,
-                                          self->b.height,
-                                          self->b.index,
-                                          self->b.top_idx,
+                                          seq++, browser->b.nr_entries,
+                                          browser->hists->nr_entries,
+                                          browser->b.height,
+                                          browser->b.index,
+                                          browser->b.top_idx,
                                           h->row_offset, h->nr_rows);
                }
                        break;
                case 'C':
                        /* Collapse the whole world. */
-                       hist_browser__set_folding(self, false);
+                       hist_browser__set_folding(browser, false);
                        break;
                case 'E':
                        /* Expand the whole world. */
-                       hist_browser__set_folding(self, true);
+                       hist_browser__set_folding(browser, true);
                        break;
                case K_ENTER:
-                       if (hist_browser__toggle_fold(self))
+                       if (hist_browser__toggle_fold(browser))
                                break;
                        /* fall thru */
                default:
@@ -370,23 +370,23 @@ static int hist_browser__run(struct hist_browser *self, const char *ev_name,
                }
        }
 out:
-       ui_browser__hide(&self->b);
+       ui_browser__hide(&browser->b);
        return key;
 }
 
-static char *callchain_list__sym_name(struct callchain_list *self,
+static char *callchain_list__sym_name(struct callchain_list *cl,
                                      char *bf, size_t bfsize)
 {
-       if (self->ms.sym)
-               return self->ms.sym->name;
+       if (cl->ms.sym)
+               return cl->ms.sym->name;
 
-       snprintf(bf, bfsize, "%#" PRIx64, self->ip);
+       snprintf(bf, bfsize, "%#" PRIx64, cl->ip);
        return bf;
 }
 
 #define LEVEL_OFFSET_STEP 3
 
-static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
+static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser,
                                                     struct callchain_node *chain_node,
                                                     u64 total, int level,
                                                     unsigned short row,
@@ -444,21 +444,21 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
                        }
 
                        color = HE_COLORSET_NORMAL;
-                       width = self->b.width - (offset + extra_offset + 2);
-                       if (ui_browser__is_current_entry(&self->b, row)) {
-                               self->selection = &chain->ms;
+                       width = browser->b.width - (offset + extra_offset + 2);
+                       if (ui_browser__is_current_entry(&browser->b, row)) {
+                               browser->selection = &chain->ms;
                                color = HE_COLORSET_SELECTED;
                                *is_current_entry = true;
                        }
 
-                       ui_browser__set_color(&self->b, color);
-                       ui_browser__gotorc(&self->b, row, 0);
+                       ui_browser__set_color(&browser->b, color);
+                       ui_browser__gotorc(&browser->b, row, 0);
                        slsmg_write_nstring(" ", offset + extra_offset);
                        slsmg_printf("%c ", folded_sign);
                        slsmg_write_nstring(str, width);
                        free(alloc_str);
 
-                       if (++row == self->b.height)
+                       if (++row == browser->b.height)
                                goto out;
 do_next:
                        if (folded_sign == '+')
@@ -467,11 +467,11 @@ do_next:
 
                if (folded_sign == '-') {
                        const int new_level = level + (extra_offset ? 2 : 1);
-                       row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
+                       row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total,
                                                                         new_level, row, row_offset,
                                                                         is_current_entry);
                }
-               if (row == self->b.height)
+               if (row == browser->b.height)
                        goto out;
                node = next;
        }
@@ -479,7 +479,7 @@ out:
        return row - first_row;
 }
 
-static int hist_browser__show_callchain_node(struct hist_browser *self,
+static int hist_browser__show_callchain_node(struct hist_browser *browser,
                                             struct callchain_node *node,
                                             int level, unsigned short row,
                                             off_t *row_offset,
@@ -488,7 +488,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
        struct callchain_list *chain;
        int first_row = row,
             offset = level * LEVEL_OFFSET_STEP,
-            width = self->b.width - offset;
+            width = browser->b.width - offset;
        char folded_sign = ' ';
 
        list_for_each_entry(chain, &node->val, list) {
@@ -503,26 +503,26 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
                }
 
                color = HE_COLORSET_NORMAL;
-               if (ui_browser__is_current_entry(&self->b, row)) {
-                       self->selection = &chain->ms;
+               if (ui_browser__is_current_entry(&browser->b, row)) {
+                       browser->selection = &chain->ms;
                        color = HE_COLORSET_SELECTED;
                        *is_current_entry = true;
                }
 
                s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
-               ui_browser__gotorc(&self->b, row, 0);
-               ui_browser__set_color(&self->b, color);
+               ui_browser__gotorc(&browser->b, row, 0);
+               ui_browser__set_color(&browser->b, color);
                slsmg_write_nstring(" ", offset);
                slsmg_printf("%c ", folded_sign);
                slsmg_write_nstring(s, width - 2);
 
-               if (++row == self->b.height)
+               if (++row == browser->b.height)
                        goto out;
        }
 
        if (folded_sign == '-')
-               row += hist_browser__show_callchain_node_rb_tree(self, node,
-                                                                self->hists->stats.total_period,
+               row += hist_browser__show_callchain_node_rb_tree(browser, node,
+                                                                browser->hists->stats.total_period,
                                                                 level + 1, row,
                                                                 row_offset,
                                                                 is_current_entry);
@@ -530,7 +530,7 @@ out:
        return row - first_row;
 }
 
-static int hist_browser__show_callchain(struct hist_browser *self,
+static int hist_browser__show_callchain(struct hist_browser *browser,
                                        struct rb_root *chain,
                                        int level, unsigned short row,
                                        off_t *row_offset,
@@ -542,31 +542,31 @@ static int hist_browser__show_callchain(struct hist_browser *self,
        for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
                struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
 
-               row += hist_browser__show_callchain_node(self, node, level,
+               row += hist_browser__show_callchain_node(browser, node, level,
                                                         row, row_offset,
                                                         is_current_entry);
-               if (row == self->b.height)
+               if (row == browser->b.height)
                        break;
        }
 
        return row - first_row;
 }
 
-static int hist_browser__show_entry(struct hist_browser *self,
+static int hist_browser__show_entry(struct hist_browser *browser,
                                    struct hist_entry *entry,
                                    unsigned short row)
 {
        char s[256];
        double percent;
        int printed = 0;
-       int width = self->b.width - 6; /* The percentage */
+       int width = browser->b.width - 6; /* The percentage */
        char folded_sign = ' ';
-       bool current_entry = ui_browser__is_current_entry(&self->b, row);
+       bool current_entry = ui_browser__is_current_entry(&browser->b, row);
        off_t row_offset = entry->row_offset;
 
        if (current_entry) {
-               self->he_selection = entry;
-               self->selection = &entry->ms;
+               browser->he_selection = entry;
+               browser->selection = &entry->ms;
        }
 
        if (symbol_conf.use_callchain) {
@@ -575,11 +575,11 @@ static int hist_browser__show_entry(struct hist_browser *self,
        }
 
        if (row_offset == 0) {
-               hist_entry__snprintf(entry, s, sizeof(s), self->hists);
-               percent = (entry->period * 100.0) / self->hists->stats.total_period;
+               hist_entry__snprintf(entry, s, sizeof(s), browser->hists);
+               percent = (entry->period * 100.0) / browser->hists->stats.total_period;
 
-               ui_browser__set_percent_color(&self->b, percent, current_entry);
-               ui_browser__gotorc(&self->b, row, 0);
+               ui_browser__set_percent_color(&browser->b, percent, current_entry);
+               ui_browser__gotorc(&browser->b, row, 0);
                if (symbol_conf.use_callchain) {
                        slsmg_printf("%c ", folded_sign);
                        width -= 2;
@@ -588,11 +588,11 @@ static int hist_browser__show_entry(struct hist_browser *self,
                slsmg_printf(" %5.2f%%", percent);
 
                /* The scroll bar isn't being used */
-               if (!self->b.navkeypressed)
+               if (!browser->b.navkeypressed)
                        width += 1;
 
-               if (!current_entry || !self->b.navkeypressed)
-                       ui_browser__set_color(&self->b, HE_COLORSET_NORMAL);
+               if (!current_entry || !browser->b.navkeypressed)
+                       ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
 
                if (symbol_conf.show_nr_samples) {
                        slsmg_printf(" %11u", entry->nr_events);
@@ -610,12 +610,12 @@ static int hist_browser__show_entry(struct hist_browser *self,
        } else
                --row_offset;
 
-       if (folded_sign == '-' && row != self->b.height) {
-               printed += hist_browser__show_callchain(self, &entry->sorted_chain,
+       if (folded_sign == '-' && row != browser->b.height) {
+               printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
                                                        1, row, &row_offset,
                                                        &current_entry);
                if (current_entry)
-                       self->he_selection = entry;
+                       browser->he_selection = entry;
        }
 
        return printed;
@@ -631,22 +631,22 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
        }
 }
 
-static unsigned int hist_browser__refresh(struct ui_browser *self)
+static unsigned int hist_browser__refresh(struct ui_browser *browser)
 {
        unsigned row = 0;
        struct rb_node *nd;
-       struct hist_browser *hb = container_of(self, struct hist_browser, b);
+       struct hist_browser *hb = container_of(browser, struct hist_browser, b);
 
-       ui_browser__hists_init_top(self);
+       ui_browser__hists_init_top(browser);
 
-       for (nd = self->top; nd; nd = rb_next(nd)) {
+       for (nd = browser->top; nd; nd = rb_next(nd)) {
                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
 
                if (h->filtered)
                        continue;
 
                row += hist_browser__show_entry(hb, h, row);
-               if (row == self->height)
+               if (row == browser->height)
                        break;
        }
 
@@ -679,27 +679,27 @@ static struct rb_node *hists__filter_prev_entries(struct rb_node *nd)
        return NULL;
 }
 
-static void ui_browser__hists_seek(struct ui_browser *self,
+static void ui_browser__hists_seek(struct ui_browser *browser,
                                   off_t offset, int whence)
 {
        struct hist_entry *h;
        struct rb_node *nd;
        bool first = true;
 
-       if (self->nr_entries == 0)
+       if (browser->nr_entries == 0)
                return;
 
-       ui_browser__hists_init_top(self);
+       ui_browser__hists_init_top(browser);
 
        switch (whence) {
        case SEEK_SET:
-               nd = hists__filter_entries(rb_first(self->entries));
+               nd = hists__filter_entries(rb_first(browser->entries));
                break;
        case SEEK_CUR:
-               nd = self->top;
+               nd = browser->top;
                goto do_offset;
        case SEEK_END:
-               nd = hists__filter_prev_entries(rb_last(self->entries));
+               nd = hists__filter_prev_entries(rb_last(browser->entries));
                first = false;
                break;
        default:
@@ -710,7 +710,7 @@ static void ui_browser__hists_seek(struct ui_browser *self,
         * Moves not relative to the first visible entry invalidates its
         * row_offset:
         */
-       h = rb_entry(self->top, struct hist_entry, rb_node);
+       h = rb_entry(browser->top, struct hist_entry, rb_node);
        h->row_offset = 0;
 
        /*
@@ -738,7 +738,7 @@ do_offset:
                                } else {
                                        h->row_offset += offset;
                                        offset = 0;
-                                       self->top = nd;
+                                       browser->top = nd;
                                        break;
                                }
                        }
@@ -746,7 +746,7 @@ do_offset:
                        if (nd == NULL)
                                break;
                        --offset;
-                       self->top = nd;
+                       browser->top = nd;
                } while (offset != 0);
        } else if (offset < 0) {
                while (1) {
@@ -759,7 +759,7 @@ do_offset:
                                        } else {
                                                h->row_offset += offset;
                                                offset = 0;
-                                               self->top = nd;
+                                               browser->top = nd;
                                                break;
                                        }
                                } else {
@@ -769,7 +769,7 @@ do_offset:
                                        } else {
                                                h->row_offset = h->nr_rows + offset;
                                                offset = 0;
-                                               self->top = nd;
+                                               browser->top = nd;
                                                break;
                                        }
                                }
@@ -779,7 +779,7 @@ do_offset:
                        if (nd == NULL)
                                break;
                        ++offset;
-                       self->top = nd;
+                       browser->top = nd;
                        if (offset == 0) {
                                /*
                                 * Last unfiltered hist_entry, check if it is
@@ -794,7 +794,7 @@ do_offset:
                        first = false;
                }
        } else {
-               self->top = nd;
+               browser->top = nd;
                h = rb_entry(nd, struct hist_entry, rb_node);
                h->row_offset = 0;
        }
@@ -802,46 +802,46 @@ do_offset:
 
 static struct hist_browser *hist_browser__new(struct hists *hists)
 {
-       struct hist_browser *self = zalloc(sizeof(*self));
+       struct hist_browser *browser = zalloc(sizeof(*browser));
 
-       if (self) {
-               self->hists = hists;
-               self->b.refresh = hist_browser__refresh;
-               self->b.seek = ui_browser__hists_seek;
-               self->b.use_navkeypressed = true;
+       if (browser) {
+               browser->hists = hists;
+               browser->b.refresh = hist_browser__refresh;
+               browser->b.seek = ui_browser__hists_seek;
+               browser->b.use_navkeypressed = true;
                if (sort__branch_mode == 1)
-                       self->has_symbols = sort_sym_from.list.next != NULL;
+                       browser->has_symbols = sort_sym_from.list.next != NULL;
                else
-                       self->has_symbols = sort_sym.list.next != NULL;
+                       browser->has_symbols = sort_sym.list.next != NULL;
        }
 
-       return self;
+       return browser;
 }
 
-static void hist_browser__delete(struct hist_browser *self)
+static void hist_browser__delete(struct hist_browser *browser)
 {
-       free(self);
+       free(browser);
 }
 
-static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
+static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser)
 {
-       return self->he_selection;
+       return browser->he_selection;
 }
 
-static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
 {
-       return self->he_selection->thread;
+       return browser->he_selection->thread;
 }
 
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
                                const char *ev_name)
 {
        char unit;
        int printed;
-       const struct dso *dso = self->dso_filter;
-       const struct thread *thread = self->thread_filter;
-       unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
-       u64 nr_events = self->stats.total_period;
+       const struct dso *dso = hists->dso_filter;
+       const struct thread *thread = hists->thread_filter;
+       unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+       u64 nr_events = hists->stats.total_period;
 
        nr_samples = convert_unit(nr_samples, &unit);
        printed = scnprintf(bf, size,
@@ -849,9 +849,9 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
                           nr_samples, unit, ev_name, nr_events);
 
 
-       if (self->uid_filter_str)
+       if (hists->uid_filter_str)
                printed += snprintf(bf + printed, size - printed,
-                                   ", UID: %s", self->uid_filter_str);
+                                   ", UID: %s", hists->uid_filter_str);
        if (thread)
                printed += scnprintf(bf + printed, size - printed,
                                    ", Thread: %s(%d)",
@@ -879,8 +879,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                    void(*timer)(void *arg), void *arg,
                                    int delay_secs)
 {
-       struct hists *self = &evsel->hists;
-       struct hist_browser *browser = hist_browser__new(self);
+       struct hists *hists = &evsel->hists;
+       struct hist_browser *browser = hist_browser__new(hists);
        struct branch_info *bi;
        struct pstack *fstack;
        char *options[16];
@@ -946,8 +946,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                        "Please enter the name of symbol you want to see",
                                        buf, "ENTER: OK, ESC: Cancel",
                                        delay_secs * 2) == K_ENTER) {
-                               self->symbol_filter_str = *buf ? buf : NULL;
-                               hists__filter_by_symbol(self);
+                               hists->symbol_filter_str = *buf ? buf : NULL;
+                               hists__filter_by_symbol(hists);
                                hist_browser__reset(browser);
                        }
                        continue;
@@ -1128,7 +1128,7 @@ zoom_out_dso:
                                sort_dso.elide = true;
                                pstack__push(fstack, &browser->hists->dso_filter);
                        }
-                       hists__filter_by_dso(self);
+                       hists__filter_by_dso(hists);
                        hist_browser__reset(browser);
                } else if (choice == zoom_thread) {
 zoom_thread:
@@ -1146,7 +1146,7 @@ zoom_out_thread:
                                sort_thread.elide = true;
                                pstack__push(fstack, &browser->hists->thread_filter);
                        }
-                       hists__filter_by_thread(self);
+                       hists__filter_by_thread(hists);
                        hist_browser__reset(browser);
                }
        }
index 9f5f888f73e30723d5f9c0fd8281345f38e0635e..791fb15ce3507c2d42d695be12c1f87a16affa0f 100644 (file)
@@ -22,6 +22,7 @@ void setup_browser(bool fallback_to_pager)
                        break;
                /* fall through */
        default:
+               use_browser = 0;
                if (fallback_to_pager)
                        setup_pager();
                break;
index ad73300f7bac6d1b11e4db0f1e121e0e26b0fb0c..95264f30417903af6f9a91aab42d757bfdf42d5f 100755 (executable)
@@ -12,7 +12,7 @@ LF='
 # First check if there is a .git to get the version from git describe
 # otherwise try to get the version from the kernel makefile
 if test -d ../../.git -o -f ../../.git &&
-       VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
+       VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) &&
        case "$VN" in
        *$LF*) (exit 1) ;;
        v[0-9]*)
index 9f7106a8d9a48cb6f9600beac10bfa3a38bd9be5..3a6bff47614f788eae96a6ea7c7ed64759ec465a 100644 (file)
@@ -18,6 +18,8 @@
 #include "util.h"
 #include "callchain.h"
 
+__thread struct callchain_cursor callchain_cursor;
+
 bool ip_callchain__valid(struct ip_callchain *chain,
                         const union perf_event *event)
 {
index 7f9c0f1ae3a9aad6f6a845299f74d1340ece754f..3bdb407f9cd9f8b9ed9e6340316c4e8237942f5c 100644 (file)
@@ -76,6 +76,8 @@ struct callchain_cursor {
        struct callchain_cursor_node    *curr;
 };
 
+extern __thread struct callchain_cursor callchain_cursor;
+
 static inline void callchain_init(struct callchain_root *root)
 {
        INIT_LIST_HEAD(&root->node.siblings);
index 0deac6a14b652df87c998b38203b0ab003f541ed..6faa3a18bfbd8514001e2d2dd25a9f9a52ce7910 100644 (file)
@@ -120,7 +120,7 @@ static char *parse_value(void)
 
 static inline int iskeychar(int c)
 {
-       return isalnum(c) || c == '-';
+       return isalnum(c) || c == '-' || c == '_';
 }
 
 static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
index 4ac5f5ae4ce903a9284d6b4f105493da2bc454c5..7400fb3fc50c91910a51eb3dc6a679ca6e4a05de 100644 (file)
@@ -159,6 +159,17 @@ out_delete_partial_list:
        return -1;
 }
 
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+                                    struct perf_event_attr *attrs, size_t nr_attrs)
+{
+       size_t i;
+
+       for (i = 0; i < nr_attrs; i++)
+               event_attr_init(attrs + i);
+
+       return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
+}
+
 static int trace_event__id(const char *evname)
 {
        char *filename, *colon;
@@ -263,7 +274,8 @@ void perf_evlist__disable(struct perf_evlist *evlist)
        for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
                        for (thread = 0; thread < evlist->threads->nr; thread++)
-                               ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
+                               ioctl(FD(pos, cpu, thread),
+                                     PERF_EVENT_IOC_DISABLE, 0);
                }
        }
 }
@@ -276,7 +288,8 @@ void perf_evlist__enable(struct perf_evlist *evlist)
        for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
                        for (thread = 0; thread < evlist->threads->nr; thread++)
-                               ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
+                               ioctl(FD(pos, cpu, thread),
+                                     PERF_EVENT_IOC_ENABLE, 0);
                }
        }
 }
index 58abb63ac13a42ac14f3014ba5977f4635797592..989bee9624c23c66e002b7a245c89c7114f3b219 100644 (file)
@@ -54,6 +54,8 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
 int perf_evlist__add_default(struct perf_evlist *evlist);
 int perf_evlist__add_attrs(struct perf_evlist *evlist,
                           struct perf_event_attr *attrs, size_t nr_attrs);
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+                                    struct perf_event_attr *attrs, size_t nr_attrs);
 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
                                 const char *tracepoints[], size_t nr_tracepoints);
 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
@@ -62,6 +64,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
 
 #define perf_evlist__add_attrs_array(evlist, array) \
        perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
+#define perf_evlist__add_default_attrs(evlist, array) \
+       __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
 
 #define perf_evlist__add_tracepoints_array(evlist, array) \
        perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
index 57e4ce57bbcc03faf7245f40a0ba3e20b43851d8..9f6cebd798eed5ee44ea4510305b93b4f2e3fd6b 100644 (file)
@@ -15,6 +15,7 @@
 #include "cpumap.h"
 #include "thread_map.h"
 #include "target.h"
+#include "../../include/linux/perf_event.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -64,6 +65,95 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
        return evsel;
 }
 
+static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+       "cycles",
+       "instructions",
+       "cache-references",
+       "cache-misses",
+       "branches",
+       "branch-misses",
+       "bus-cycles",
+       "stalled-cycles-frontend",
+       "stalled-cycles-backend",
+       "ref-cycles",
+};
+
+const char *__perf_evsel__hw_name(u64 config)
+{
+       if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
+               return perf_evsel__hw_names[config];
+
+       return "unknown-hardware";
+}
+
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int colon = 0;
+       struct perf_event_attr *attr = &evsel->attr;
+       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
+       bool exclude_guest_default = false;
+
+#define MOD_PRINT(context, mod)        do {                                    \
+               if (!attr->exclude_##context) {                         \
+                       if (!colon) colon = r++;                        \
+                       r += scnprintf(bf + r, size - r, "%c", mod);    \
+               } } while(0)
+
+       if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
+               MOD_PRINT(kernel, 'k');
+               MOD_PRINT(user, 'u');
+               MOD_PRINT(hv, 'h');
+               exclude_guest_default = true;
+       }
+
+       if (attr->precise_ip) {
+               if (!colon)
+                       colon = r++;
+               r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
+               exclude_guest_default = true;
+       }
+
+       if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+               MOD_PRINT(host, 'H');
+               MOD_PRINT(guest, 'G');
+       }
+#undef MOD_PRINT
+       if (colon)
+               bf[colon] = ':';
+       return r;
+}
+
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int ret;
+
+       switch (evsel->attr.type) {
+       case PERF_TYPE_RAW:
+               ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+               break;
+
+       case PERF_TYPE_HARDWARE:
+               ret = perf_evsel__hw_name(evsel, bf, size);
+               break;
+       default:
+               /*
+                * FIXME
+                *
+                * This is the minimal perf_evsel__name so that we can
+                * reconstruct event names taking into account event modifiers.
+                *
+                * The old event_name uses it now for raw anr hw events, so that
+                * we don't drag all the parsing stuff into the python binding.
+                *
+                * On the next devel cycle the rest of the event naming will be
+                * brought here.
+                */
+               return 0;
+       }
+
+       return ret;
+}
+
 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
                        struct perf_evsel *first)
 {
@@ -404,16 +494,24 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel,
 }
 
 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
-                                      struct perf_sample *sample)
+                                      struct perf_sample *sample,
+                                      bool swapped)
 {
        const u64 *array = event->sample.array;
+       union u64_swap u;
 
        array += ((event->header.size -
                   sizeof(event->header)) / sizeof(u64)) - 1;
 
        if (type & PERF_SAMPLE_CPU) {
-               u32 *p = (u32 *)array;
-               sample->cpu = *p;
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+               }
+
+               sample->cpu = u.val32[0];
                array--;
        }
 
@@ -433,9 +531,16 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_TID) {
-               u32 *p = (u32 *)array;
-               sample->pid = p[0];
-               sample->tid = p[1];
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+                       u.val32[1] = bswap_32(u.val32[1]);
+               }
+
+               sample->pid = u.val32[0];
+               sample->tid = u.val32[1];
        }
 
        return 0;
@@ -472,7 +577,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        if (event->header.type != PERF_RECORD_SAMPLE) {
                if (!sample_id_all)
                        return 0;
-               return perf_event__parse_id_sample(event, type, data);
+               return perf_event__parse_id_sample(event, type, data, swapped);
        }
 
        array = event->sample.array;
index 3d6b3e4cb66bb9bfbb32b75958ef1f23a4880163..4ba8b564e6f47f039652ebac739d9d899d5881f5 100644 (file)
@@ -83,6 +83,9 @@ void perf_evsel__config(struct perf_evsel *evsel,
                        struct perf_record_opts *opts,
                        struct perf_evsel *first);
 
+const char* __perf_evsel__hw_name(u64 config);
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
index 2dd5edf161b731ae1be22b9530e25e59fd444f47..e909d43cf5422333e4d47d2b412c1f3fa0b636af 100644 (file)
@@ -1942,7 +1942,6 @@ int perf_file_header__read(struct perf_file_header *header,
                else
                        return -1;
        } else if (ph->needs_swap) {
-               unsigned int i;
                /*
                 * feature bitmap is declared as an array of unsigned longs --
                 * not good since its size can differ between the host that
@@ -1958,14 +1957,17 @@ int perf_file_header__read(struct perf_file_header *header,
                 * file), punt and fallback to the original behavior --
                 * clearing all feature bits and setting buildid.
                 */
-               for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
-                       header->adds_features[i] = bswap_64(header->adds_features[i]);
+               mem_bswap_64(&header->adds_features,
+                           BITS_TO_U64(HEADER_FEAT_BITS));
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
-                       for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
-                               header->adds_features[i] = bswap_64(header->adds_features[i]);
-                               header->adds_features[i] = bswap_32(header->adds_features[i]);
-                       }
+                       /* unswap as u64 */
+                       mem_bswap_64(&header->adds_features,
+                                   BITS_TO_U64(HEADER_FEAT_BITS));
+
+                       /* unswap as u32 */
+                       mem_bswap_32(&header->adds_features,
+                                   BITS_TO_U32(HEADER_FEAT_BITS));
                }
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
@@ -2091,6 +2093,35 @@ static int read_attr(int fd, struct perf_header *ph,
        return ret <= 0 ? -1 : 0;
 }
 
+static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
+{
+       struct event_format *event = trace_find_event(evsel->attr.config);
+       char bf[128];
+
+       if (event == NULL)
+               return -1;
+
+       snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
+       evsel->name = strdup(bf);
+       if (event->name == NULL)
+               return -1;
+
+       return 0;
+}
+
+static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist)
+{
+       struct perf_evsel *pos;
+
+       list_for_each_entry(pos, &evlist->entries, node) {
+               if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
+                   perf_evsel__set_tracepoint_name(pos))
+                       return -1;
+       }
+
+       return 0;
+}
+
 int perf_session__read_header(struct perf_session *session, int fd)
 {
        struct perf_header *header = &session->header;
@@ -2172,6 +2203,9 @@ int perf_session__read_header(struct perf_session *session, int fd)
 
        lseek(fd, header->data_offset, SEEK_SET);
 
+       if (perf_evlist__set_tracepoint_names(session->evlist))
+               goto out_delete_evlist;
+
        header->frozen = 1;
        return 0;
 out_errno:
index 1293b5ebea4dd31327a9e36402577779ec41626b..514e2a4b367d6d53838d9ac726a063b0ba3bf5e8 100644 (file)
@@ -378,7 +378,7 @@ void hist_entry__free(struct hist_entry *he)
  * collapse the histogram
  */
 
-static bool hists__collapse_insert_entry(struct hists *hists,
+static bool hists__collapse_insert_entry(struct hists *hists __used,
                                         struct rb_root *root,
                                         struct hist_entry *he)
 {
@@ -397,8 +397,9 @@ static bool hists__collapse_insert_entry(struct hists *hists,
                        iter->period += he->period;
                        iter->nr_events += he->nr_events;
                        if (symbol_conf.use_callchain) {
-                               callchain_cursor_reset(&hists->callchain_cursor);
-                               callchain_merge(&hists->callchain_cursor, iter->callchain,
+                               callchain_cursor_reset(&callchain_cursor);
+                               callchain_merge(&callchain_cursor,
+                                               iter->callchain,
                                                he->callchain);
                        }
                        hist_entry__free(he);
index cfc64e293f90b6f3e065d42379da8bb00302616b..34bb556d62191a1300b295011be6210f9633fcf7 100644 (file)
@@ -67,8 +67,6 @@ struct hists {
        struct events_stats     stats;
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
-       /* Best would be to reuse the session callchain cursor */
-       struct callchain_cursor callchain_cursor;
 };
 
 struct hist_entry *__hists__add_entry(struct hists *self,
index f1584833bd2296aa2befe02950e5d49c7a93741f..587a230d2075a3ab1451cb46d34f651e20ffc486 100644 (file)
@@ -8,6 +8,8 @@
 #define BITS_PER_LONG __WORDSIZE
 #define BITS_PER_BYTE           8
 #define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
 
 #define for_each_set_bit(bit, addr, size) \
        for ((bit) = find_first_bit((addr), (size));            \
index 35ae56864e4f59625369941886b196438f107c99..a1f4e3669142630aa641d1a34b0f53673b6b67b0 100644 (file)
@@ -669,25 +669,26 @@ struct machine *machines__find(struct rb_root *self, pid_t pid)
 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
 {
        char path[PATH_MAX];
-       const char *root_dir;
+       const char *root_dir = "";
        struct machine *machine = machines__find(self, pid);
 
-       if (!machine || machine->pid != pid) {
-               if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
-                       root_dir = "";
-               else {
-                       if (!symbol_conf.guestmount)
-                               goto out;
-                       sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
-                       if (access(path, R_OK)) {
-                               pr_err("Can't access file %s\n", path);
-                               goto out;
-                       }
-                       root_dir = path;
+       if (machine && (machine->pid == pid))
+               goto out;
+
+       if ((pid != HOST_KERNEL_ID) &&
+           (pid != DEFAULT_GUEST_KERNEL_ID) &&
+           (symbol_conf.guestmount)) {
+               sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+               if (access(path, R_OK)) {
+                       pr_err("Can't access file %s\n", path);
+                       machine = NULL;
+                       goto out;
                }
-               machine = machines__add(self, pid, root_dir);
+               root_dir = path;
        }
 
+       machine = machines__add(self, pid, root_dir);
+
 out:
        return machine;
 }
index 1915de20dcacf28e3dd037ca5b5f462577b561b5..3322b8446e891af7d420f0eb18e2232db51e8621 100644 (file)
@@ -57,6 +57,10 @@ void setup_pager(void)
        }
        if (!pager)
                pager = getenv("PAGER");
+       if (!pager) {
+               if (!access("/usr/bin/pager", X_OK))
+                       pager = "/usr/bin/pager";
+       }
        if (!pager)
                pager = "less";
        else if (!*pager || !strcmp(pager, "cat"))
index fac7d59309b83698cf22829936e86e1c511eb31e..05dbc8b3c767217ceb3204c7be46f5aedbb205c4 100644 (file)
@@ -62,19 +62,6 @@ static struct event_symbol event_symbols[] = {
 #define PERF_EVENT_TYPE(config)                __PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)          __PERF_EVENT_FIELD(config, EVENT)
 
-static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
-       "cycles",
-       "instructions",
-       "cache-references",
-       "cache-misses",
-       "branches",
-       "branch-misses",
-       "bus-cycles",
-       "stalled-cycles-frontend",
-       "stalled-cycles-backend",
-       "ref-cycles",
-};
-
 static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
        "cpu-clock",
        "task-clock",
@@ -300,6 +287,16 @@ const char *event_name(struct perf_evsel *evsel)
        u64 config = evsel->attr.config;
        int type = evsel->attr.type;
 
+       if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
+               /*
+                * XXX minimal fix, see comment on perf_evsen__name, this static buffer
+                * will go away together with event_name in the next devel cycle.
+                */
+               static char bf[128];
+               perf_evsel__name(evsel, bf, sizeof(bf));
+               return bf;
+       }
+
        if (evsel->name)
                return evsel->name;
 
@@ -317,9 +314,7 @@ const char *__event_name(int type, u64 config)
 
        switch (type) {
        case PERF_TYPE_HARDWARE:
-               if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
-                       return hw_event_names[config];
-               return "unknown-hardware";
+               return __perf_evsel__hw_name(config);
 
        case PERF_TYPE_HW_CACHE: {
                u8 cache_type, cache_op, cache_result;
index 59dccc98b5540284373ead1b43a74eb81a7a856d..0dda25d82d06ff81d55be2eb6529e970e4a3cd07 100644 (file)
@@ -2164,16 +2164,12 @@ int del_perf_probe_events(struct strlist *dellist)
 
 error:
        if (kfd >= 0) {
-               if (namelist)
-                       strlist__delete(namelist);
-
+               strlist__delete(namelist);
                close(kfd);
        }
 
        if (ufd >= 0) {
-               if (unamelist)
-                       strlist__delete(unamelist);
-
+               strlist__delete(unamelist);
                close(ufd);
        }
 
index 93d355d2710989d784f5a796e02f58c2d4938888..56142d0fb8d79bfdc1bd21cd7ac24f0ded4627bf 100644 (file)
@@ -288,7 +288,8 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
        return bi;
 }
 
-int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
+int machine__resolve_callchain(struct machine *self,
+                              struct perf_evsel *evsel __used,
                               struct thread *thread,
                               struct ip_callchain *chain,
                               struct symbol **parent)
@@ -297,7 +298,12 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
        unsigned int i;
        int err;
 
-       callchain_cursor_reset(&evsel->hists.callchain_cursor);
+       callchain_cursor_reset(&callchain_cursor);
+
+       if (chain->nr > PERF_MAX_STACK_DEPTH) {
+               pr_warning("corrupted callchain. skipping...\n");
+               return 0;
+       }
 
        for (i = 0; i < chain->nr; i++) {
                u64 ip;
@@ -317,7 +323,14 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
                        case PERF_CONTEXT_USER:
                                cpumode = PERF_RECORD_MISC_USER;        break;
                        default:
-                               break;
+                               pr_debug("invalid callchain context: "
+                                        "%"PRId64"\n", (s64) ip);
+                               /*
+                                * It seems the callchain is corrupted.
+                                * Discard all.
+                                */
+                               callchain_cursor_reset(&callchain_cursor);
+                               return 0;
                        }
                        continue;
                }
@@ -333,7 +346,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
                                break;
                }
 
-               err = callchain_cursor_append(&evsel->hists.callchain_cursor,
+               err = callchain_cursor_append(&callchain_cursor,
                                              ip, al.map, al.sym);
                if (err)
                        return err;
@@ -429,6 +442,16 @@ static void perf_tool__fill_defaults(struct perf_tool *tool)
                        tool->finished_round = process_finished_round_stub;
        }
 }
+void mem_bswap_32(void *src, int byte_size)
+{
+       u32 *m = src;
+       while (byte_size > 0) {
+               *m = bswap_32(*m);
+               byte_size -= sizeof(u32);
+               ++m;
+       }
+}
 
 void mem_bswap_64(void *src, int byte_size)
 {
@@ -441,37 +464,65 @@ void mem_bswap_64(void *src, int byte_size)
        }
 }
 
-static void perf_event__all64_swap(union perf_event *event)
+static void swap_sample_id_all(union perf_event *event, void *data)
+{
+       void *end = (void *) event + event->header.size;
+       int size = end - data;
+
+       BUG_ON(size % sizeof(u64));
+       mem_bswap_64(data, size);
+}
+
+static void perf_event__all64_swap(union perf_event *event,
+                                  bool sample_id_all __used)
 {
        struct perf_event_header *hdr = &event->header;
        mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
 }
 
-static void perf_event__comm_swap(union perf_event *event)
+static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
 {
        event->comm.pid = bswap_32(event->comm.pid);
        event->comm.tid = bswap_32(event->comm.tid);
+
+       if (sample_id_all) {
+               void *data = &event->comm.comm;
+
+               data += ALIGN(strlen(data) + 1, sizeof(u64));
+               swap_sample_id_all(event, data);
+       }
 }
 
-static void perf_event__mmap_swap(union perf_event *event)
+static void perf_event__mmap_swap(union perf_event *event,
+                                 bool sample_id_all)
 {
        event->mmap.pid   = bswap_32(event->mmap.pid);
        event->mmap.tid   = bswap_32(event->mmap.tid);
        event->mmap.start = bswap_64(event->mmap.start);
        event->mmap.len   = bswap_64(event->mmap.len);
        event->mmap.pgoff = bswap_64(event->mmap.pgoff);
+
+       if (sample_id_all) {
+               void *data = &event->mmap.filename;
+
+               data += ALIGN(strlen(data) + 1, sizeof(u64));
+               swap_sample_id_all(event, data);
+       }
 }
 
-static void perf_event__task_swap(union perf_event *event)
+static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
 {
        event->fork.pid  = bswap_32(event->fork.pid);
        event->fork.tid  = bswap_32(event->fork.tid);
        event->fork.ppid = bswap_32(event->fork.ppid);
        event->fork.ptid = bswap_32(event->fork.ptid);
        event->fork.time = bswap_64(event->fork.time);
+
+       if (sample_id_all)
+               swap_sample_id_all(event, &event->fork + 1);
 }
 
-static void perf_event__read_swap(union perf_event *event)
+static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
 {
        event->read.pid          = bswap_32(event->read.pid);
        event->read.tid          = bswap_32(event->read.tid);
@@ -479,6 +530,9 @@ static void perf_event__read_swap(union perf_event *event)
        event->read.time_enabled = bswap_64(event->read.time_enabled);
        event->read.time_running = bswap_64(event->read.time_running);
        event->read.id           = bswap_64(event->read.id);
+
+       if (sample_id_all)
+               swap_sample_id_all(event, &event->read + 1);
 }
 
 static u8 revbyte(u8 b)
@@ -530,7 +584,8 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
        swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
 }
 
-static void perf_event__hdr_attr_swap(union perf_event *event)
+static void perf_event__hdr_attr_swap(union perf_event *event,
+                                     bool sample_id_all __used)
 {
        size_t size;
 
@@ -541,18 +596,21 @@ static void perf_event__hdr_attr_swap(union perf_event *event)
        mem_bswap_64(event->attr.id, size);
 }
 
-static void perf_event__event_type_swap(union perf_event *event)
+static void perf_event__event_type_swap(union perf_event *event,
+                                       bool sample_id_all __used)
 {
        event->event_type.event_type.event_id =
                bswap_64(event->event_type.event_type.event_id);
 }
 
-static void perf_event__tracing_data_swap(union perf_event *event)
+static void perf_event__tracing_data_swap(union perf_event *event,
+                                         bool sample_id_all __used)
 {
        event->tracing_data.size = bswap_32(event->tracing_data.size);
 }
 
-typedef void (*perf_event__swap_op)(union perf_event *event);
+typedef void (*perf_event__swap_op)(union perf_event *event,
+                                   bool sample_id_all);
 
 static perf_event__swap_op perf_event__swap_ops[] = {
        [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
@@ -868,7 +926,7 @@ static struct machine *
                else
                        pid = event->ip.pid;
 
-               return perf_session__find_machine(session, pid);
+               return perf_session__findnew_machine(session, pid);
        }
 
        return perf_session__find_host_machine(session);
@@ -986,6 +1044,15 @@ static int perf_session__process_user_event(struct perf_session *session, union
        }
 }
 
+static void event_swap(union perf_event *event, bool sample_id_all)
+{
+       perf_event__swap_op swap;
+
+       swap = perf_event__swap_ops[event->header.type];
+       if (swap)
+               swap(event, sample_id_all);
+}
+
 static int perf_session__process_event(struct perf_session *session,
                                       union perf_event *event,
                                       struct perf_tool *tool,
@@ -994,9 +1061,8 @@ static int perf_session__process_event(struct perf_session *session,
        struct perf_sample sample;
        int ret;
 
-       if (session->header.needs_swap &&
-           perf_event__swap_ops[event->header.type])
-               perf_event__swap_ops[event->header.type](event);
+       if (session->header.needs_swap)
+               event_swap(event, session->sample_id_all);
 
        if (event->header.type >= PERF_RECORD_HEADER_MAX)
                return -EINVAL;
@@ -1428,7 +1494,6 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
                          int print_sym, int print_dso, int print_symoffset)
 {
        struct addr_location al;
-       struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
        struct callchain_cursor_node *node;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample,
@@ -1446,10 +1511,10 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
                                error("Failed to resolve callchain. Skipping\n");
                        return;
                }
-               callchain_cursor_commit(cursor);
+               callchain_cursor_commit(&callchain_cursor);
 
                while (1) {
-                       node = callchain_cursor_current(cursor);
+                       node = callchain_cursor_current(&callchain_cursor);
                        if (!node)
                                break;
 
@@ -1460,12 +1525,12 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
                        }
                        if (print_dso) {
                                printf(" (");
-                               map__fprintf_dsoname(al.map, stdout);
+                               map__fprintf_dsoname(node->map, stdout);
                                printf(")");
                        }
                        printf("\n");
 
-                       callchain_cursor_advance(cursor);
+                       callchain_cursor_advance(&callchain_cursor);
                }
 
        } else {
index 7a5434c005653a4736cabdc2921ad907f74f2c4c..0c702e3f0a364272a9d979b04786403c4e079bbe 100644 (file)
@@ -80,6 +80,7 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
 bool perf_session__has_traces(struct perf_session *self, const char *msg);
 
 void mem_bswap_64(void *src, int byte_size);
+void mem_bswap_32(void *src, int byte_size);
 void perf_event__attr_swap(struct perf_event_attr *attr);
 
 int perf_session__create_kernel_maps(struct perf_session *self);
index e2ba8858f3e105b60eb0476b786c33d4aa36e863..3e2e5ea0f03f692c41f62e8aeb1f5d3382c86ce1 100644 (file)
@@ -323,6 +323,7 @@ struct dso *dso__new(const char *name)
                dso->sorted_by_name = 0;
                dso->has_build_id = 0;
                dso->kernel = DSO_TYPE_USER;
+               dso->needs_swap = DSO_SWAP__UNSET;
                INIT_LIST_HEAD(&dso->node);
        }
 
@@ -1156,6 +1157,33 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
        return -1;
 }
 
+static int dso__swap_init(struct dso *dso, unsigned char eidata)
+{
+       static unsigned int const endian = 1;
+
+       dso->needs_swap = DSO_SWAP__NO;
+
+       switch (eidata) {
+       case ELFDATA2LSB:
+               /* We are big endian, DSO is little endian. */
+               if (*(unsigned char const *)&endian != 1)
+                       dso->needs_swap = DSO_SWAP__YES;
+               break;
+
+       case ELFDATA2MSB:
+               /* We are little endian, DSO is big endian. */
+               if (*(unsigned char const *)&endian != 0)
+                       dso->needs_swap = DSO_SWAP__YES;
+               break;
+
+       default:
+               pr_err("unrecognized DSO data encoding %d\n", eidata);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
                         int fd, symbol_filter_t filter, int kmodule,
                         int want_symtab)
@@ -1187,6 +1215,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
                goto out_elf_end;
        }
 
+       if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
+               goto out_elf_end;
+
        /* Always reject images with a mismatched build-id: */
        if (dso->has_build_id) {
                u8 build_id[BUILD_ID_SIZE];
@@ -1272,7 +1303,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
                if (opdsec && sym.st_shndx == opdidx) {
                        u32 offset = sym.st_value - opdshdr.sh_addr;
                        u64 *opd = opddata->d_buf + offset;
-                       sym.st_value = *opd;
+                       sym.st_value = DSO__SWAP(dso, u64, *opd);
                        sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
                }
 
@@ -2786,8 +2817,11 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
 
 struct map *dso__new_map(const char *name)
 {
+       struct map *map = NULL;
        struct dso *dso = dso__new(name);
-       struct map *map = map__new2(0, dso, MAP__FUNCTION);
+
+       if (dso)
+               map = map__new2(0, dso, MAP__FUNCTION);
 
        return map;
 }
index 5649d63798cbfc0a24f3892d5a83b0d5809512ee..af0752b1aca1a9097b1eb552da416683bf3394c7 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <stdio.h>
+#include <byteswap.h>
 
 #ifdef HAVE_CPLUS_DEMANGLE
 extern char *cplus_demangle(const char *, int);
@@ -160,11 +161,18 @@ enum dso_kernel_type {
        DSO_TYPE_GUEST_KERNEL
 };
 
+enum dso_swap_type {
+       DSO_SWAP__UNSET,
+       DSO_SWAP__NO,
+       DSO_SWAP__YES,
+};
+
 struct dso {
        struct list_head node;
        struct rb_root   symbols[MAP__NR_TYPES];
        struct rb_root   symbol_names[MAP__NR_TYPES];
        enum dso_kernel_type    kernel;
+       enum dso_swap_type      needs_swap;
        u8               adjust_symbols:1;
        u8               has_build_id:1;
        u8               hit:1;
@@ -182,6 +190,28 @@ struct dso {
        char             name[0];
 };
 
+#define DSO__SWAP(dso, type, val)                      \
+({                                                     \
+       type ____r = val;                               \
+       BUG_ON(dso->needs_swap == DSO_SWAP__UNSET);     \
+       if (dso->needs_swap == DSO_SWAP__YES) {         \
+               switch (sizeof(____r)) {                \
+               case 2:                                 \
+                       ____r = bswap_16(val);          \
+                       break;                          \
+               case 4:                                 \
+                       ____r = bswap_32(val);          \
+                       break;                          \
+               case 8:                                 \
+                       ____r = bswap_64(val);          \
+                       break;                          \
+               default:                                \
+                       BUG_ON(1);                      \
+               }                                       \
+       }                                               \
+       ____r;                                          \
+})
+
 struct dso *dso__new(const char *name);
 void dso__delete(struct dso *dso);
 
index 84d9bd7820049cdcb641e0efc96f22a28b8f210a..9b5f856cc28096b865d1c34c12073a23e5eb5a9c 100644 (file)
@@ -188,28 +188,27 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
                nt = realloc(threads, (sizeof(*threads) +
                                       sizeof(pid_t) * total_tasks));
                if (nt == NULL)
-                       goto out_free_threads;
+                       goto out_free_namelist;
 
                threads = nt;
 
-               if (threads) {
-                       for (i = 0; i < items; i++)
-                               threads->map[j++] = atoi(namelist[i]->d_name);
-                       threads->nr = total_tasks;
-               }
-
-               for (i = 0; i < items; i++)
+               for (i = 0; i < items; i++) {
+                       threads->map[j++] = atoi(namelist[i]->d_name);
                        free(namelist[i]);
+               }
+               threads->nr = total_tasks;
                free(namelist);
-
-               if (!threads)
-                       break;
        }
 
 out:
        strlist__delete(slist);
        return threads;
 
+out_free_namelist:
+       for (i = 0; i < items; i++)
+               free(namelist[i]);
+       free(namelist);
+
 out_free_threads:
        free(threads);
        threads = NULL;
index df2fddbf0cd2f46d376d691786295526fa1deab0..5dd3b5ec8411191c41d76d19345496b3923f9a6f 100644 (file)
@@ -198,9 +198,8 @@ void print_trace_event(int cpu, void *data, int size)
        record.data = data;
 
        trace_seq_init(&s);
-       pevent_print_event(pevent, &s, &record);
+       pevent_event_info(&s, event, &record);
        trace_seq_do_printf(&s);
-       printf("\n");
 }
 
 void print_event(int cpu, void *data, int size, unsigned long long nsecs,
index ab2f682fd44c6cbc402b4a1ab8cbfd0fa9ce0f2e..16de7ad4850fa553ad72177415a9a56c71ebf3fe 100644 (file)
@@ -73,8 +73,8 @@ int backwards_count;
 char *progname;
 
 int num_cpus;
-cpu_set_t *cpu_mask;
-size_t cpu_mask_size;
+cpu_set_t *cpu_present_set, *cpu_mask;
+size_t cpu_present_setsize, cpu_mask_size;
 
 struct counters {
        unsigned long long tsc;         /* per thread */
@@ -103,6 +103,12 @@ struct timeval tv_even;
 struct timeval tv_odd;
 struct timeval tv_delta;
 
+int mark_cpu_present(int pkg, int core, int cpu)
+{
+       CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
+       return 0;
+}
+
 /*
  * cpu_mask_init(ncpus)
  *
@@ -118,6 +124,18 @@ void cpu_mask_init(int ncpus)
        }
        cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
        CPU_ZERO_S(cpu_mask_size, cpu_mask);
+
+       /*
+        * Allocate and initialize cpu_present_set
+        */
+       cpu_present_set = CPU_ALLOC(ncpus);
+       if (cpu_present_set == NULL) {
+               perror("CPU_ALLOC");
+               exit(3);
+       }
+       cpu_present_setsize = CPU_ALLOC_SIZE(ncpus);
+       CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
+       for_all_cpus(mark_cpu_present);
 }
 
 void cpu_mask_uninit()
@@ -125,6 +143,9 @@ void cpu_mask_uninit()
        CPU_FREE(cpu_mask);
        cpu_mask = NULL;
        cpu_mask_size = 0;
+       CPU_FREE(cpu_present_set);
+       cpu_present_set = NULL;
+       cpu_present_setsize = 0;
 }
 
 int cpu_migrate(int cpu)
@@ -912,6 +933,8 @@ int is_snb(unsigned int family, unsigned int model)
        switch (model) {
        case 0x2A:
        case 0x2D:
+       case 0x3A:      /* IVB */
+       case 0x3D:      /* IVB Xeon */
                return 1;
        }
        return 0;
@@ -1047,6 +1070,9 @@ int fork_it(char **argv)
        int retval;
        pid_t child_pid;
        get_counters(cnt_even);
+
+        /* clear affinity side-effect of get_counters() */
+        sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
        gettimeofday(&tv_even, (struct timezone *)NULL);
 
        child_pid = fork();
index 28bc57ee757cf04d7b2166dc3e4b236b5fd19de6..a4162e15c25f89f32862a1f4fb2630c32f8c1c60 100644 (file)
@@ -1,4 +1,4 @@
-TARGETS = breakpoints vm
+TARGETS = breakpoints kcmp mqueue vm
 
 all:
        for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
new file mode 100644 (file)
index 0000000..dc79b86
--- /dev/null
@@ -0,0 +1,29 @@
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
+ifeq ($(ARCH),i386)
+        ARCH := X86
+       CFLAGS := -DCONFIG_X86_32 -D__i386__
+endif
+ifeq ($(ARCH),x86_64)
+       ARCH := X86
+       CFLAGS := -DCONFIG_X86_64 -D__x86_64__
+endif
+
+CFLAGS += -I../../../../arch/x86/include/generated/
+CFLAGS += -I../../../../include/
+CFLAGS += -I../../../../usr/include/
+CFLAGS += -I../../../../arch/x86/include/
+
+all:
+ifeq ($(ARCH),X86)
+       gcc $(CFLAGS) kcmp_test.c -o run_test
+else
+       echo "Not an x86 target, can't build kcmp selftest"
+endif
+
+run-tests: all
+       ./kcmp_test
+
+clean:
+       rm -fr ./run_test
+       rm -fr ./test-file
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
new file mode 100644 (file)
index 0000000..358cc6b
--- /dev/null
@@ -0,0 +1,94 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <limits.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include <linux/unistd.h>
+#include <linux/kcmp.h>
+
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
+{
+       return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
+}
+
+int main(int argc, char **argv)
+{
+       const char kpath[] = "kcmp-test-file";
+       int pid1, pid2;
+       int fd1, fd2;
+       int status;
+
+       fd1 = open(kpath, O_RDWR | O_CREAT | O_TRUNC, 0644);
+       pid1 = getpid();
+
+       if (fd1 < 0) {
+               perror("Can't create file");
+               exit(1);
+       }
+
+       pid2 = fork();
+       if (pid2 < 0) {
+               perror("fork failed");
+               exit(1);
+       }
+
+       if (!pid2) {
+               int pid2 = getpid();
+               int ret;
+
+               fd2 = open(kpath, O_RDWR, 0644);
+               if (fd2 < 0) {
+                       perror("Can't open file");
+                       exit(1);
+               }
+
+               /* An example of output and arguments */
+               printf("pid1: %6d pid2: %6d FD: %2ld FILES: %2ld VM: %2ld "
+                      "FS: %2ld SIGHAND: %2ld IO: %2ld SYSVSEM: %2ld "
+                      "INV: %2ld\n",
+                      pid1, pid2,
+                      sys_kcmp(pid1, pid2, KCMP_FILE,          fd1, fd2),
+                      sys_kcmp(pid1, pid2, KCMP_FILES,         0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_VM,            0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_FS,            0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_SIGHAND,       0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_IO,            0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_SYSVSEM,       0, 0),
+
+                       /* This one should fail */
+                      sys_kcmp(pid1, pid2, KCMP_TYPES + 1,     0, 0));
+
+               /* This one should return same fd */
+               ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
+               if (ret) {
+                       printf("FAIL: 0 expected but %d returned\n", ret);
+                       ret = -1;
+               } else
+                       printf("PASS: 0 returned as expected\n");
+
+               /* Compare with self */
+               ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
+               if (ret) {
+                       printf("FAIL: 0 expected but %li returned\n", ret);
+                       ret = -1;
+               } else
+                       printf("PASS: 0 returned as expected\n");
+
+               exit(ret);
+       }
+
+       waitpid(pid2, &status, P_ALL);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/mqueue/.gitignore b/tools/testing/selftests/mqueue/.gitignore
new file mode 100644 (file)
index 0000000..d8d4237
--- /dev/null
@@ -0,0 +1,2 @@
+mq_open_tests
+mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
new file mode 100644 (file)
index 0000000..54c0aad
--- /dev/null
@@ -0,0 +1,10 @@
+all:
+       gcc -O2 -lrt mq_open_tests.c -o mq_open_tests
+       gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c
+
+run_tests:
+       ./mq_open_tests /test1
+       ./mq_perf_tests
+
+clean:
+       rm -f mq_open_tests mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/mq_open_tests.c b/tools/testing/selftests/mqueue/mq_open_tests.c
new file mode 100644 (file)
index 0000000..711cc29
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ *     Doug Ledford <dledford@redhat.com>
+ *
+ * mq_open_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_open_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_open_tests.c
+ *   Tests the various situations that should either succeed or fail to
+ *   open a posix message queue and then reports whether or not they
+ *   did as they were supposed to.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+
+static char *usage =
+"Usage:\n"
+"  %s path\n"
+"\n"
+"      path    Path name of the message queue to create\n"
+"\n"
+"      Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *DEF_MSGS = "/proc/sys/fs/mqueue/msg_default";
+char *DEF_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_default";
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+int default_settings;
+struct rlimit saved_limits, cur_limits;
+int saved_def_msgs, saved_def_msgsize, saved_max_msgs, saved_max_msgsize;
+int cur_def_msgs, cur_def_msgsize, cur_max_msgs, cur_max_msgsize;
+FILE *def_msgs, *def_msgsize, *max_msgs, *max_msgsize;
+char *queue_path;
+mqd_t queue = -1;
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+void validate_current_settings();
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result);
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+       static int in_shutdown = 0;
+
+       /* In case we get called recursively by a set() call below */
+       if (in_shutdown++)
+               return;
+
+       seteuid(0);
+
+       if (queue != -1)
+               if (mq_close(queue))
+                       perror("mq_close() during shutdown");
+       if (queue_path)
+               /*
+                * Be silent if this fails, if we cleaned up already it's
+                * expected to fail
+                */
+               mq_unlink(queue_path);
+       if (default_settings) {
+               if (saved_def_msgs)
+                       __set(def_msgs, saved_def_msgs,
+                             "failed to restore saved_def_msgs");
+               if (saved_def_msgsize)
+                       __set(def_msgsize, saved_def_msgsize,
+                             "failed to restore saved_def_msgsize");
+       }
+       if (saved_max_msgs)
+               __set(max_msgs, saved_max_msgs,
+                     "failed to restore saved_max_msgs");
+       if (saved_max_msgsize)
+               __set(max_msgsize, saved_max_msgsize,
+                     "failed to restore saved_max_msgsize");
+       if (exit_val)
+               error(exit_val, errno, "%s at %d", err_cause, line_no);
+       exit(0);
+}
+
+static inline int get(FILE *stream)
+{
+       int value;
+       rewind(stream);
+       if (fscanf(stream, "%d", &value) != 1)
+               shutdown(4, "Error reading /proc entry", __LINE__ - 1);
+       return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+       int new_value;
+
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               return shutdown(5, "Failed writing to /proc file",
+                               __LINE__ - 1);
+       new_value = get(stream);
+       if (new_value != value)
+               return shutdown(5, "We didn't get what we wrote to /proc back",
+                               __LINE__ - 1);
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+       if (getrlimit(type, rlim))
+               shutdown(6, "getrlimit()", __LINE__ - 1);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+       if (setrlimit(type, rlim))
+               shutdown(7, "setrlimit()", __LINE__ - 1);
+}
+
+void validate_current_settings()
+{
+       int rlim_needed;
+
+       if (cur_limits.rlim_cur < 4096) {
+               printf("Current rlimit value for POSIX message queue bytes is "
+                      "unreasonably low,\nincreasing.\n\n");
+               cur_limits.rlim_cur = 8192;
+               cur_limits.rlim_max = 16384;
+               setr(RLIMIT_MSGQUEUE, &cur_limits);
+       }
+
+       if (default_settings) {
+               rlim_needed = (cur_def_msgs + 1) * (cur_def_msgsize + 1 +
+                                                   2 * sizeof(void *));
+               if (rlim_needed > cur_limits.rlim_cur) {
+                       printf("Temporarily lowering default queue parameters "
+                              "to something that will work\n"
+                              "with the current rlimit values.\n\n");
+                       set(def_msgs, 10);
+                       cur_def_msgs = 10;
+                       set(def_msgsize, 128);
+                       cur_def_msgsize = 128;
+               }
+       } else {
+               rlim_needed = (cur_max_msgs + 1) * (cur_max_msgsize + 1 +
+                                                   2 * sizeof(void *));
+               if (rlim_needed > cur_limits.rlim_cur) {
+                       printf("Temporarily lowering maximum queue parameters "
+                              "to something that will work\n"
+                              "with the current rlimit values in case this is "
+                              "a kernel that ties the default\n"
+                              "queue parameters to the maximum queue "
+                              "parameters.\n\n");
+                       set(max_msgs, 10);
+                       cur_max_msgs = 10;
+                       set(max_msgsize, 128);
+                       cur_max_msgsize = 128;
+               }
+       }
+}
+
+/*
+ * test_queue - Test opening a queue, shutdown if we fail.  This should
+ * only be called in situations that should never fail.  We clean up
+ * after ourselves and return the queue attributes in *result.
+ */
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result)
+{
+       int flags = O_RDWR | O_EXCL | O_CREAT;
+       int perms = DEFFILEMODE;
+
+       if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+               shutdown(1, "mq_open()", __LINE__);
+       if (mq_getattr(queue, result))
+               shutdown(1, "mq_getattr()", __LINE__);
+       if (mq_close(queue))
+               shutdown(1, "mq_close()", __LINE__);
+       queue = -1;
+       if (mq_unlink(queue_path))
+               shutdown(1, "mq_unlink()", __LINE__);
+}
+
+/*
+ * Same as test_queue above, but failure is not fatal.
+ * Returns:
+ * 0 - Failed to create a queue
+ * 1 - Created a queue, attributes in *result
+ */
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result)
+{
+       int flags = O_RDWR | O_EXCL | O_CREAT;
+       int perms = DEFFILEMODE;
+
+       if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+               return 0;
+       if (mq_getattr(queue, result))
+               shutdown(1, "mq_getattr()", __LINE__);
+       if (mq_close(queue))
+               shutdown(1, "mq_close()", __LINE__);
+       queue = -1;
+       if (mq_unlink(queue_path))
+               shutdown(1, "mq_unlink()", __LINE__);
+       return 1;
+}
+
+int main(int argc, char *argv[])
+{
+       struct mq_attr attr, result;
+
+       if (argc != 2) {
+               fprintf(stderr, "Must pass a valid queue name\n\n");
+               fprintf(stderr, usage, argv[0]);
+               exit(1);
+       }
+
+       /*
+        * Although we can create a msg queue with a non-absolute path name,
+        * unlink will fail.  So, if the name doesn't start with a /, add one
+        * when we save it.
+        */
+       if (*argv[1] == '/')
+               queue_path = strdup(argv[1]);
+       else {
+               queue_path = malloc(strlen(argv[1]) + 2);
+               if (!queue_path) {
+                       perror("malloc()");
+                       exit(1);
+               }
+               queue_path[0] = '/';
+               queue_path[1] = 0;
+               strcat(queue_path, argv[1]);
+       }
+
+       if (getuid() != 0) {
+               fprintf(stderr, "Not running as root, but almost all tests "
+                       "require root in order to modify\nsystem settings.  "
+                       "Exiting.\n");
+               exit(1);
+       }
+
+       /* Find out what files there are for us to make tweaks in */
+       def_msgs = fopen(DEF_MSGS, "r+");
+       def_msgsize = fopen(DEF_MSGSIZE, "r+");
+       max_msgs = fopen(MAX_MSGS, "r+");
+       max_msgsize = fopen(MAX_MSGSIZE, "r+");
+
+       if (!max_msgs)
+               shutdown(2, "Failed to open msg_max", __LINE__);
+       if (!max_msgsize)
+               shutdown(2, "Failed to open msgsize_max", __LINE__);
+       if (def_msgs || def_msgsize)
+               default_settings = 1;
+
+       /* Load up the current system values for everything we can */
+       getr(RLIMIT_MSGQUEUE, &saved_limits);
+       cur_limits = saved_limits;
+       if (default_settings) {
+               saved_def_msgs = cur_def_msgs = get(def_msgs);
+               saved_def_msgsize = cur_def_msgsize = get(def_msgsize);
+       }
+       saved_max_msgs = cur_max_msgs = get(max_msgs);
+       saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+
+       /* Tell the user our initial state */
+       printf("\nInitial system state:\n");
+       printf("\tUsing queue path:\t\t%s\n", queue_path);
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", saved_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", saved_limits.rlim_max);
+       printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
+       if (default_settings) {
+               printf("\tDefault Message Size:\t\t%d\n", saved_def_msgsize);
+               printf("\tDefault Queue Size:\t\t%d\n", saved_def_msgs);
+       } else {
+               printf("\tDefault Message Size:\t\tNot Supported\n");
+               printf("\tDefault Queue Size:\t\tNot Supported\n");
+       }
+       printf("\n");
+
+       validate_current_settings();
+
+       printf("Adjusted system state for testing:\n");
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", cur_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", cur_limits.rlim_max);
+       printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
+       if (default_settings) {
+               printf("\tDefault Message Size:\t\t%d\n", cur_def_msgsize);
+               printf("\tDefault Queue Size:\t\t%d\n", cur_def_msgs);
+       }
+
+       printf("\n\nTest series 1, behavior when no attr struct "
+              "passed to mq_open:\n");
+       if (!default_settings) {
+               test_queue(NULL, &result);
+               printf("Given sane system settings, mq_open without an attr "
+                      "struct succeeds:\tPASS\n");
+               if (result.mq_maxmsg != cur_max_msgs ||
+                   result.mq_msgsize != cur_max_msgsize) {
+                       printf("Kernel does not support setting the default "
+                              "mq attributes,\nbut also doesn't tie the "
+                              "defaults to the maximums:\t\t\tPASS\n");
+               } else {
+                       set(max_msgs, ++cur_max_msgs);
+                       set(max_msgsize, ++cur_max_msgsize);
+                       test_queue(NULL, &result);
+                       if (result.mq_maxmsg == cur_max_msgs &&
+                           result.mq_msgsize == cur_max_msgsize)
+                               printf("Kernel does not support setting the "
+                                      "default mq attributes and\n"
+                                      "also ties system wide defaults to "
+                                      "the system wide maximums:\t\t"
+                                      "FAIL\n");
+                       else
+                               printf("Kernel does not support setting the "
+                                      "default mq attributes,\n"
+                                      "but also doesn't tie the defaults to "
+                                      "the maximums:\t\t\tPASS\n");
+               }
+       } else {
+               printf("Kernel supports setting defaults separately from "
+                      "maximums:\t\tPASS\n");
+               /*
+                * While we are here, go ahead and test that the kernel
+                * properly follows the default settings
+                */
+               test_queue(NULL, &result);
+               printf("Given sane values, mq_open without an attr struct "
+                      "succeeds:\t\tPASS\n");
+               if (result.mq_maxmsg != cur_def_msgs ||
+                   result.mq_msgsize != cur_def_msgsize)
+                       printf("Kernel supports setting defaults, but does "
+                              "not actually honor them:\tFAIL\n\n");
+               else {
+                       set(def_msgs, ++cur_def_msgs);
+                       set(def_msgsize, ++cur_def_msgsize);
+                       /* In case max was the same as the default */
+                       set(max_msgs, ++cur_max_msgs);
+                       set(max_msgsize, ++cur_max_msgsize);
+                       test_queue(NULL, &result);
+                       if (result.mq_maxmsg != cur_def_msgs ||
+                           result.mq_msgsize != cur_def_msgsize)
+                               printf("Kernel supports setting defaults, but "
+                                      "does not actually honor them:\t"
+                                      "FAIL\n");
+                       else
+                               printf("Kernel properly honors default setting "
+                                      "knobs:\t\t\t\tPASS\n");
+               }
+               set(def_msgs, cur_max_msgs + 1);
+               cur_def_msgs = cur_max_msgs + 1;
+               set(def_msgsize, cur_max_msgsize + 1);
+               cur_def_msgsize = cur_max_msgsize + 1;
+               if (cur_def_msgs * (cur_def_msgsize + 2 * sizeof(void *)) >=
+                   cur_limits.rlim_cur) {
+                       cur_limits.rlim_cur = (cur_def_msgs + 2) *
+                               (cur_def_msgsize + 2 * sizeof(void *));
+                       cur_limits.rlim_max = 2 * cur_limits.rlim_cur;
+                       setr(RLIMIT_MSGQUEUE, &cur_limits);
+               }
+               if (test_queue_fail(NULL, &result)) {
+                       if (result.mq_maxmsg == cur_max_msgs &&
+                           result.mq_msgsize == cur_max_msgsize)
+                               printf("Kernel properly limits default values "
+                                      "to lesser of default/max:\t\tPASS\n");
+                       else
+                               printf("Kernel does not properly set default "
+                                      "queue parameters when\ndefaults > "
+                                      "max:\t\t\t\t\t\t\t\tFAIL\n");
+               } else
+                       printf("Kernel fails to open mq because defaults are "
+                              "greater than maximums:\tFAIL\n");
+               set(def_msgs, --cur_def_msgs);
+               set(def_msgsize, --cur_def_msgsize);
+               cur_limits.rlim_cur = cur_limits.rlim_max = cur_def_msgs *
+                       cur_def_msgsize;
+               setr(RLIMIT_MSGQUEUE, &cur_limits);
+               if (test_queue_fail(NULL, &result))
+                       printf("Kernel creates queue even though defaults "
+                              "would exceed\nrlimit setting:"
+                              "\t\t\t\t\t\t\t\tFAIL\n");
+               else
+                       printf("Kernel properly fails to create queue when "
+                              "defaults would\nexceed rlimit:"
+                              "\t\t\t\t\t\t\t\tPASS\n");
+       }
+
+       /*
+        * Test #2 - open with an attr struct that exceeds rlimit
+        */
+       printf("\n\nTest series 2, behavior when attr struct is "
+              "passed to mq_open:\n");
+       cur_max_msgs = 32;
+       cur_max_msgsize = cur_limits.rlim_max >> 4;
+       set(max_msgs, cur_max_msgs);
+       set(max_msgsize, cur_max_msgsize);
+       attr.mq_maxmsg = cur_max_msgs;
+       attr.mq_msgsize = cur_max_msgsize;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open in excess of rlimit max when euid = 0 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open in excess of rlimit max when euid = 0 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = cur_max_msgs + 1;
+       attr.mq_msgsize = 10;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_maxmsg > limit when euid = 0 "
+                      "succeeded:\t\tPASS\n");
+       else
+               printf("Queue open with mq_maxmsg > limit when euid = 0 "
+                      "failed:\t\tFAIL\n");
+       attr.mq_maxmsg = 1;
+       attr.mq_msgsize = cur_max_msgsize + 1;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_msgsize > limit when euid = 0 "
+                      "succeeded:\t\tPASS\n");
+       else
+               printf("Queue open with mq_msgsize > limit when euid = 0 "
+                      "failed:\t\tFAIL\n");
+       attr.mq_maxmsg = 65536;
+       attr.mq_msgsize = 65536;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with total size > 2GB when euid = 0 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with total size > 2GB when euid = 0 "
+                      "failed:\t\t\tPASS\n");
+       seteuid(99);
+       attr.mq_maxmsg = cur_max_msgs;
+       attr.mq_msgsize = cur_max_msgsize;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open in excess of rlimit max when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open in excess of rlimit max when euid = 99 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = cur_max_msgs + 1;
+       attr.mq_msgsize = 10;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_maxmsg > limit when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with mq_maxmsg > limit when euid = 99 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = 1;
+       attr.mq_msgsize = cur_max_msgsize + 1;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_msgsize > limit when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with mq_msgsize > limit when euid = 99 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = 65536;
+       attr.mq_msgsize = 65536;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with total size > 2GB when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with total size > 2GB when euid = 99 "
+                      "failed:\t\t\tPASS\n");
+
+       shutdown(0,"",0);
+}
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
new file mode 100644 (file)
index 0000000..2fadd4b
--- /dev/null
@@ -0,0 +1,741 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ *     Doug Ledford <dledford@redhat.com>
+ *
+ * mq_perf_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_perf_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_perf_tests.c
+ *   Tests various types of message queue workloads, concentrating on those
+ *   situations that invole large message sizes, large message queue depths,
+ *   or both, and reports back useful metrics about kernel message queue
+ *   performance.
+ *
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+#include <popt.h>
+
+static char *usage =
+"Usage:\n"
+"  %s [-c #[,#..] -f] path\n"
+"\n"
+"      -c #    Skip most tests and go straight to a high queue depth test\n"
+"              and then run that test continuously (useful for running at\n"
+"              the same time as some other workload to see how much the\n"
+"              cache thrashing caused by adding messages to a very deep\n"
+"              queue impacts the performance of other programs).  The number\n"
+"              indicates which CPU core we should bind the process to during\n"
+"              the run.  If you have more than one physical CPU, then you\n"
+"              will need one copy per physical CPU package, and you should\n"
+"              specify the CPU cores to pin ourself to via a comma separated\n"
+"              list of CPU values.\n"
+"      -f      Only usable with continuous mode.  Pin ourself to the CPUs\n"
+"              as requested, then instead of looping doing a high mq\n"
+"              workload, just busy loop.  This will allow us to lock up a\n"
+"              single CPU just like we normally would, but without actually\n"
+"              thrashing the CPU cache.  This is to make it easier to get\n"
+"              comparable numbers from some other workload running on the\n"
+"              other CPUs.  One set of numbers with # CPUs locked up running\n"
+"              an mq workload, and another set of numbers with those same\n"
+"              CPUs locked away from the test workload, but not doing\n"
+"              anything to trash the cache like the mq workload might.\n"
+"      path    Path name of the message queue to create\n"
+"\n"
+"      Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define MAX_CPUS 64
+char *cpu_option_string;
+int cpus_to_pin[MAX_CPUS];
+int num_cpus_to_pin;
+pthread_t cpu_threads[MAX_CPUS];
+pthread_t main_thread;
+cpu_set_t *cpu_set;
+int cpu_set_size;
+int cpus_online;
+
+#define MSG_SIZE 16
+#define TEST1_LOOPS 10000000
+#define TEST2_LOOPS 100000
+int continuous_mode;
+int continuous_mode_fake;
+
+struct rlimit saved_limits, cur_limits;
+int saved_max_msgs, saved_max_msgsize;
+int cur_max_msgs, cur_max_msgsize;
+FILE *max_msgs, *max_msgsize;
+int cur_nice;
+char *queue_path = "/mq_perf_tests";
+mqd_t queue = -1;
+struct mq_attr result;
+int mq_prio_max;
+
+const struct poptOption options[] = {
+       {
+               .longName = "continuous",
+               .shortName = 'c',
+               .argInfo = POPT_ARG_STRING,
+               .arg = &cpu_option_string,
+               .val = 'c',
+               .descrip = "Run continuous tests at a high queue depth in "
+                       "order to test the effects of cache thrashing on "
+                       "other tasks on the system.  This test is intended "
+                       "to be run on one core of each physical CPU while "
+                       "some other CPU intensive task is run on all the other "
+                       "cores of that same physical CPU and the other task "
+                       "is timed.  It is assumed that the process of adding "
+                       "messages to the message queue in a tight loop will "
+                       "impact that other task to some degree.  Once the "
+                       "tests are performed in this way, you should then "
+                       "re-run the tests using fake mode in order to check "
+                       "the difference in time required to perform the CPU "
+                       "intensive task",
+               .argDescrip = "cpu[,cpu]",
+       },
+       {
+               .longName = "fake",
+               .shortName = 'f',
+               .argInfo = POPT_ARG_NONE,
+               .arg = &continuous_mode_fake,
+               .val = 0,
+               .descrip = "Tie up the CPUs that we would normally tie up in"
+                       "continuous mode, but don't actually do any mq stuff, "
+                       "just keep the CPU busy so it can't be used to process "
+                       "system level tasks as this would free up resources on "
+                       "the other CPU cores and skew the comparison between "
+                       "the no-mqueue work and mqueue work tests",
+               .argDescrip = NULL,
+       },
+       {
+               .longName = "path",
+               .shortName = 'p',
+               .argInfo = POPT_ARG_STRING | POPT_ARGFLAG_SHOW_DEFAULT,
+               .arg = &queue_path,
+               .val = 'p',
+               .descrip = "The name of the path to use in the mqueue "
+                       "filesystem for our tests",
+               .argDescrip = "pathname",
+       },
+       POPT_AUTOHELP
+       POPT_TABLEEND
+};
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context);
+void sig_action(int signum, siginfo_t *info, void *context);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline int try_set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+static inline void open_queue(struct mq_attr *attr);
+void increase_limits(void);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+       static int in_shutdown = 0;
+       int errno_at_shutdown = errno;
+       int i;
+
+       /* In case we get called by multiple threads or from an sighandler */
+       if (in_shutdown++)
+               return;
+
+       for (i = 0; i < num_cpus_to_pin; i++)
+               if (cpu_threads[i]) {
+                       pthread_kill(cpu_threads[i], SIGUSR1);
+                       pthread_join(cpu_threads[i], NULL);
+               }
+
+       if (queue != -1)
+               if (mq_close(queue))
+                       perror("mq_close() during shutdown");
+       if (queue_path)
+               /*
+                * Be silent if this fails, if we cleaned up already it's
+                * expected to fail
+                */
+               mq_unlink(queue_path);
+       if (saved_max_msgs)
+               __set(max_msgs, saved_max_msgs,
+                     "failed to restore saved_max_msgs");
+       if (saved_max_msgsize)
+               __set(max_msgsize, saved_max_msgsize,
+                     "failed to restore saved_max_msgsize");
+       if (exit_val)
+               error(exit_val, errno_at_shutdown, "%s at %d",
+                     err_cause, line_no);
+       exit(0);
+}
+
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context)
+{
+       if (pthread_self() != main_thread)
+               pthread_exit(0);
+       else {
+               fprintf(stderr, "Caught signal %d in SIGUSR1 handler, "
+                               "exiting\n", signum);
+               shutdown(0, "", 0);
+               fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+               exit(0);
+       }
+}
+
+void sig_action(int signum, siginfo_t *info, void *context)
+{
+       if (pthread_self() != main_thread)
+               pthread_kill(main_thread, signum);
+       else {
+               fprintf(stderr, "Caught signal %d, exiting\n", signum);
+               shutdown(0, "", 0);
+               fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+               exit(0);
+       }
+}
+
+static inline int get(FILE *stream)
+{
+       int value;
+       rewind(stream);
+       if (fscanf(stream, "%d", &value) != 1)
+               shutdown(4, "Error reading /proc entry", __LINE__);
+       return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+       int new_value;
+
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               return shutdown(5, "Failed writing to /proc file", __LINE__);
+       new_value = get(stream);
+       if (new_value != value)
+               return shutdown(5, "We didn't get what we wrote to /proc back",
+                               __LINE__);
+}
+
+static inline int try_set(FILE *stream, int value)
+{
+       int new_value;
+
+       rewind(stream);
+       fprintf(stream, "%d", value);
+       new_value = get(stream);
+       return new_value == value;
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+       if (getrlimit(type, rlim))
+               shutdown(6, "getrlimit()", __LINE__);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+       if (setrlimit(type, rlim))
+               shutdown(7, "setrlimit()", __LINE__);
+}
+
+/**
+ * open_queue - open the global queue for testing
+ * @attr - An attr struct specifying the desired queue traits
+ * @result - An attr struct that lists the actual traits the queue has
+ *
+ * This open is not allowed to fail, failure will result in an orderly
+ * shutdown of the program.  The global queue_path is used to set what
+ * queue to open, the queue descriptor is saved in the global queue
+ * variable.
+ */
+static inline void open_queue(struct mq_attr *attr)
+{
+       int flags = O_RDWR | O_EXCL | O_CREAT | O_NONBLOCK;
+       int perms = DEFFILEMODE;
+
+       queue = mq_open(queue_path, flags, perms, attr);
+       if (queue == -1)
+               shutdown(1, "mq_open()", __LINE__);
+       if (mq_getattr(queue, &result))
+               shutdown(1, "mq_getattr()", __LINE__);
+       printf("\n\tQueue %s created:\n", queue_path);
+       printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
+              "O_NONBLOCK" : "(null)");
+       printf("\t\tmq_maxmsg:\t\t\t%d\n", result.mq_maxmsg);
+       printf("\t\tmq_msgsize:\t\t\t%d\n", result.mq_msgsize);
+       printf("\t\tmq_curmsgs:\t\t\t%d\n", result.mq_curmsgs);
+}
+
+void *fake_cont_thread(void *arg)
+{
+       int i;
+
+       for (i = 0; i < num_cpus_to_pin; i++)
+               if (cpu_threads[i] == pthread_self())
+                       break;
+       printf("\tStarted fake continuous mode thread %d on CPU %d\n", i,
+              cpus_to_pin[i]);
+       while (1)
+               ;
+}
+
+void *cont_thread(void *arg)
+{
+       char buff[MSG_SIZE];
+       int i, priority;
+
+       for (i = 0; i < num_cpus_to_pin; i++)
+               if (cpu_threads[i] == pthread_self())
+                       break;
+       printf("\tStarted continuous mode thread %d on CPU %d\n", i,
+              cpus_to_pin[i]);
+       while (1) {
+               while (mq_send(queue, buff, sizeof(buff), 0) == 0)
+                       ;
+               mq_receive(queue, buff, sizeof(buff), &priority);
+       }
+}
+
+#define drain_queue() \
+       while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
+
+#define do_untimed_send() \
+       do { \
+               if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+                       shutdown(3, "Test send failure", __LINE__); \
+       } while (0)
+
+#define do_send_recv() \
+       do { \
+               clock_gettime(clock, &start); \
+               if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+                       shutdown(3, "Test send failure", __LINE__); \
+               clock_gettime(clock, &middle); \
+               if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
+                       shutdown(3, "Test receive failure", __LINE__); \
+               clock_gettime(clock, &end); \
+               nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \
+                       (middle.tv_nsec - start.tv_nsec); \
+               send_total.tv_nsec += nsec; \
+               if (send_total.tv_nsec >= 1000000000) { \
+                       send_total.tv_sec++; \
+                       send_total.tv_nsec -= 1000000000; \
+               } \
+               nsec = ((end.tv_sec - middle.tv_sec) * 1000000000) + \
+                       (end.tv_nsec - middle.tv_nsec); \
+               recv_total.tv_nsec += nsec; \
+               if (recv_total.tv_nsec >= 1000000000) { \
+                       recv_total.tv_sec++; \
+                       recv_total.tv_nsec -= 1000000000; \
+               } \
+       } while (0)
+
+struct test {
+       char *desc;
+       void (*func)(int *);
+};
+
+void const_prio(int *prio)
+{
+       return;
+}
+
+void inc_prio(int *prio)
+{
+       if (++*prio == mq_prio_max)
+               *prio = 0;
+}
+
+void dec_prio(int *prio)
+{
+       if (--*prio < 0)
+               *prio = mq_prio_max - 1;
+}
+
+void random_prio(int *prio)
+{
+       *prio = random() % mq_prio_max;
+}
+
+struct test test2[] = {
+       {"\n\tTest #2a: Time send/recv message, queue full, constant prio\n",
+               const_prio},
+       {"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n",
+               inc_prio},
+       {"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n",
+               dec_prio},
+       {"\n\tTest #2d: Time send/recv message, queue full, random prio\n",
+               random_prio},
+       {NULL, NULL}
+};
+
+/**
+ * Tests to perform (all done with MSG_SIZE messages):
+ *
+ * 1) Time to add/remove message with 0 messages on queue
+ * 1a) with constant prio
+ * 2) Time to add/remove message when queue close to capacity:
+ * 2a) with constant prio
+ * 2b) with increasing prio
+ * 2c) with decreasing prio
+ * 2d) with random prio
+ * 3) Test limits of priorities honored (double check _SC_MQ_PRIO_MAX)
+ */
+void *perf_test_thread(void *arg)
+{
+       char buff[MSG_SIZE];
+       int prio_out, prio_in;
+       int i;
+       clockid_t clock;
+       pthread_t *t;
+       struct timespec res, start, middle, end, send_total, recv_total;
+       unsigned long long nsec;
+       struct test *cur_test;
+
+       t = &cpu_threads[0];
+       printf("\n\tStarted mqueue performance test thread on CPU %d\n",
+              cpus_to_pin[0]);
+       mq_prio_max = sysconf(_SC_MQ_PRIO_MAX);
+       if (mq_prio_max == -1)
+               shutdown(2, "sysconf(_SC_MQ_PRIO_MAX)", __LINE__);
+       if (pthread_getcpuclockid(cpu_threads[0], &clock) != 0)
+               shutdown(2, "pthread_getcpuclockid", __LINE__);
+
+       if (clock_getres(clock, &res))
+               shutdown(2, "clock_getres()", __LINE__);
+
+       printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
+       printf("\t\tClock resolution:\t\t%d nsec%s\n", res.tv_nsec,
+              res.tv_nsec > 1 ? "s" : "");
+
+
+
+       printf("\n\tTest #1: Time send/recv message, queue empty\n");
+       printf("\t\t(%d iterations)\n", TEST1_LOOPS);
+       prio_out = 0;
+       send_total.tv_sec = 0;
+       send_total.tv_nsec = 0;
+       recv_total.tv_sec = 0;
+       recv_total.tv_nsec = 0;
+       for (i = 0; i < TEST1_LOOPS; i++)
+               do_send_recv();
+       printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+              send_total.tv_sec, send_total.tv_nsec);
+       nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+                send_total.tv_nsec) / TEST1_LOOPS;
+       printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+       printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+              recv_total.tv_sec, recv_total.tv_nsec);
+       nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+               recv_total.tv_nsec) / TEST1_LOOPS;
+       printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+
+
+       for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
+               printf(cur_test->desc);
+               printf("\t\t(%d iterations)\n", TEST2_LOOPS);
+               prio_out = 0;
+               send_total.tv_sec = 0;
+               send_total.tv_nsec = 0;
+               recv_total.tv_sec = 0;
+               recv_total.tv_nsec = 0;
+               printf("\t\tFilling queue...");
+               fflush(stdout);
+               clock_gettime(clock, &start);
+               for (i = 0; i < result.mq_maxmsg - 1; i++) {
+                       do_untimed_send();
+                       cur_test->func(&prio_out);
+               }
+               clock_gettime(clock, &end);
+               nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+                       1000000000) + (end.tv_nsec - start.tv_nsec);
+               printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+                      nsec % 1000000000);
+               printf("\t\tTesting...");
+               fflush(stdout);
+               for (i = 0; i < TEST2_LOOPS; i++) {
+                       do_send_recv();
+                       cur_test->func(&prio_out);
+               }
+               printf("done.\n");
+               printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+                      send_total.tv_sec, send_total.tv_nsec);
+               nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+                        send_total.tv_nsec) / TEST2_LOOPS;
+               printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+               printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+                      recv_total.tv_sec, recv_total.tv_nsec);
+               nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+                       recv_total.tv_nsec) / TEST2_LOOPS;
+               printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+               printf("\t\tDraining queue...");
+               fflush(stdout);
+               clock_gettime(clock, &start);
+               drain_queue();
+               clock_gettime(clock, &end);
+               nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+                       1000000000) + (end.tv_nsec - start.tv_nsec);
+               printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+                      nsec % 1000000000);
+       }
+       return 0;
+}
+
+void increase_limits(void)
+{
+       cur_limits.rlim_cur = RLIM_INFINITY;
+       cur_limits.rlim_max = RLIM_INFINITY;
+       setr(RLIMIT_MSGQUEUE, &cur_limits);
+       while (try_set(max_msgs, cur_max_msgs += 10))
+               ;
+       cur_max_msgs = get(max_msgs);
+       while (try_set(max_msgsize, cur_max_msgsize += 1024))
+               ;
+       cur_max_msgsize = get(max_msgsize);
+       if (setpriority(PRIO_PROCESS, 0, -20) != 0)
+               shutdown(2, "setpriority()", __LINE__);
+       cur_nice = -20;
+}
+
+int main(int argc, char *argv[])
+{
+       struct mq_attr attr;
+       char *option, *next_option;
+       int i, cpu;
+       struct sigaction sa;
+       poptContext popt_context;
+       char rc;
+       void *retval;
+
+       main_thread = pthread_self();
+       num_cpus_to_pin = 0;
+
+       if (sysconf(_SC_NPROCESSORS_ONLN) == -1) {
+               perror("sysconf(_SC_NPROCESSORS_ONLN)");
+               exit(1);
+       }
+       cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
+       cpu_set = CPU_ALLOC(cpus_online);
+       if (cpu_set == NULL) {
+               perror("CPU_ALLOC()");
+               exit(1);
+       }
+       cpu_set_size = CPU_ALLOC_SIZE(cpus_online);
+       CPU_ZERO_S(cpu_set_size, cpu_set);
+
+       popt_context = poptGetContext(NULL, argc, (const char **)argv,
+                                     options, 0);
+
+       while ((rc = poptGetNextOpt(popt_context)) > 0) {
+               switch (rc) {
+               case 'c':
+                       continuous_mode = 1;
+                       option = cpu_option_string;
+                       do {
+                               next_option = strchr(option, ',');
+                               if (next_option)
+                                       *next_option = '\0';
+                               cpu = atoi(option);
+                               if (cpu >= cpus_online)
+                                       fprintf(stderr, "CPU %d exceeds "
+                                               "cpus online, ignoring.\n",
+                                               cpu);
+                               else
+                                       cpus_to_pin[num_cpus_to_pin++] = cpu;
+                               if (next_option)
+                                       option = ++next_option;
+                       } while (next_option && num_cpus_to_pin < MAX_CPUS);
+                       /* Double check that they didn't give us the same CPU
+                        * more than once */
+                       for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
+                               if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
+                                               cpu_set)) {
+                                       fprintf(stderr, "Any given CPU may "
+                                               "only be given once.\n");
+                                       exit(1);
+                               } else
+                                       CPU_SET_S(cpus_to_pin[cpu],
+                                                 cpu_set_size, cpu_set);
+                       }
+                       break;
+               case 'p':
+                       /*
+                        * Although we can create a msg queue with a
+                        * non-absolute path name, unlink will fail.  So,
+                        * if the name doesn't start with a /, add one
+                        * when we save it.
+                        */
+                       option = queue_path;
+                       if (*option != '/') {
+                               queue_path = malloc(strlen(option) + 2);
+                               if (!queue_path) {
+                                       perror("malloc()");
+                                       exit(1);
+                               }
+                               queue_path[0] = '/';
+                               queue_path[1] = 0;
+                               strcat(queue_path, option);
+                               free(option);
+                       }
+                       break;
+               }
+       }
+
+       if (continuous_mode && num_cpus_to_pin == 0) {
+               fprintf(stderr, "Must pass at least one CPU to continuous "
+                       "mode.\n");
+               poptPrintUsage(popt_context, stderr, 0);
+               exit(1);
+       } else if (!continuous_mode) {
+               num_cpus_to_pin = 1;
+               cpus_to_pin[0] = cpus_online - 1;
+       }
+
+       if (getuid() != 0) {
+               fprintf(stderr, "Not running as root, but almost all tests "
+                       "require root in order to modify\nsystem settings.  "
+                       "Exiting.\n");
+               exit(1);
+       }
+
+       max_msgs = fopen(MAX_MSGS, "r+");
+       max_msgsize = fopen(MAX_MSGSIZE, "r+");
+       if (!max_msgs)
+               shutdown(2, "Failed to open msg_max", __LINE__);
+       if (!max_msgsize)
+               shutdown(2, "Failed to open msgsize_max", __LINE__);
+
+       /* Load up the current system values for everything we can */
+       getr(RLIMIT_MSGQUEUE, &saved_limits);
+       cur_limits = saved_limits;
+       saved_max_msgs = cur_max_msgs = get(max_msgs);
+       saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+       errno = 0;
+       cur_nice = getpriority(PRIO_PROCESS, 0);
+       if (errno)
+               shutdown(2, "getpriority()", __LINE__);
+
+       /* Tell the user our initial state */
+       printf("\nInitial system state:\n");
+       printf("\tUsing queue path:\t\t\t%s\n", queue_path);
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", saved_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", saved_limits.rlim_max);
+       printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
+       printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+       printf("\n");
+
+       increase_limits();
+
+       printf("Adjusted system state for testing:\n");
+       if (cur_limits.rlim_cur == RLIM_INFINITY) {
+               printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
+               printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
+       } else {
+               printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n",
+                      cur_limits.rlim_cur);
+               printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n",
+                      cur_limits.rlim_max);
+       }
+       printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);
+       printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+       printf("\tContinuous mode:\t\t\t(%s)\n", continuous_mode ?
+              (continuous_mode_fake ? "fake mode" : "enabled") :
+              "disabled");
+       printf("\tCPUs to pin:\t\t\t\t%d", cpus_to_pin[0]);
+       for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
+                       printf(",%d", cpus_to_pin[cpu]);
+       printf("\n");
+
+       sa.sa_sigaction = sig_action_SIGUSR1;
+       sigemptyset(&sa.sa_mask);
+       sigaddset(&sa.sa_mask, SIGHUP);
+       sigaddset(&sa.sa_mask, SIGINT);
+       sigaddset(&sa.sa_mask, SIGQUIT);
+       sigaddset(&sa.sa_mask, SIGTERM);
+       sa.sa_flags = SA_SIGINFO;
+       if (sigaction(SIGUSR1, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGUSR1)", __LINE__);
+       sa.sa_sigaction = sig_action;
+       if (sigaction(SIGHUP, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGHUP)", __LINE__);
+       if (sigaction(SIGINT, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGINT)", __LINE__);
+       if (sigaction(SIGQUIT, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGQUIT)", __LINE__);
+       if (sigaction(SIGTERM, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGTERM)", __LINE__);
+
+       if (!continuous_mode_fake) {
+               attr.mq_flags = O_NONBLOCK;
+               attr.mq_maxmsg = cur_max_msgs;
+               attr.mq_msgsize = MSG_SIZE;
+               open_queue(&attr);
+       }
+       for (i = 0; i < num_cpus_to_pin; i++) {
+               pthread_attr_t thread_attr;
+               void *thread_func;
+
+               if (continuous_mode_fake)
+                       thread_func = &fake_cont_thread;
+               else if (continuous_mode)
+                       thread_func = &cont_thread;
+               else
+                       thread_func = &perf_test_thread;
+
+               CPU_ZERO_S(cpu_set_size, cpu_set);
+               CPU_SET_S(cpus_to_pin[i], cpu_set_size, cpu_set);
+               pthread_attr_init(&thread_attr);
+               pthread_attr_setaffinity_np(&thread_attr, cpu_set_size,
+                                           cpu_set);
+               if (pthread_create(&cpu_threads[i], &thread_attr, thread_func,
+                                  NULL))
+                       shutdown(1, "pthread_create()", __LINE__);
+               pthread_attr_destroy(&thread_attr);
+       }
+
+       if (!continuous_mode) {
+               pthread_join(cpu_threads[0], &retval);
+               shutdown((long)retval, "perf_test_thread()", __LINE__);
+       } else {
+               while (1)
+                       sleep(1);
+       }
+       shutdown(0, "", 0);
+}
index 65b845bd4e3e792ca09ce01aa31da92ced51a302..085872bb2bb593502024d020316d2978c2cf9031 100644 (file)
@@ -134,7 +134,7 @@ config INITRAMFS_COMPRESSION_BZIP2
        depends on RD_BZIP2
        help
          Its compression ratio and speed is intermediate.
-         Decompression speed is slowest among the four.  The initramfs
+         Decompression speed is slowest among the choices.  The initramfs
          size is about 10% smaller with bzip2, in comparison to gzip.
          Bzip2 uses a large amount of memory. For modern kernels you
          will need at least 8MB RAM or more for booting.
@@ -143,9 +143,9 @@ config INITRAMFS_COMPRESSION_LZMA
        bool "LZMA"
        depends on RD_LZMA
        help
-         The most recent compression algorithm.
-         Its ratio is best, decompression speed is between the other
-         three. Compression is slowest. The initramfs size is about 33%
+         This algorithm's compression ratio is best.
+         Decompression speed is between the other choices.
+         Compression is slowest. The initramfs size is about 33%
          smaller with LZMA in comparison to gzip.
 
 config INITRAMFS_COMPRESSION_XZ
@@ -161,7 +161,7 @@ config INITRAMFS_COMPRESSION_LZO
        bool "LZO"
        depends on RD_LZO
        help
-         Its compression ratio is the poorest among the four. The kernel
+         Its compression ratio is the poorest among the choices. The kernel
          size is about 10% bigger than gzip; however its speed
          (both compression and decompression) is the fastest.
 
index 01f572c10c71c1ba2fb5775320600b61829e73c6..23a41a9f8db999f4b70fd4c0cd4b6ae8b26ee3c2 100644 (file)
@@ -334,6 +334,11 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
 }
 
 #ifdef __KVM_HAVE_MSI
+static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msi(struct kvm *kvm,
                                           struct kvm_assigned_dev_kernel *dev)
 {
@@ -346,7 +351,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
        }
 
        dev->host_irq = dev->dev->irq;
-       if (request_threaded_irq(dev->host_irq, NULL,
+       if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
                                 kvm_assigned_dev_thread_msi, 0,
                                 dev->irq_name, dev)) {
                pci_disable_msi(dev->dev);
@@ -358,6 +363,11 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
 #endif
 
 #ifdef __KVM_HAVE_MSIX
+static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msix(struct kvm *kvm,
                                            struct kvm_assigned_dev_kernel *dev)
 {
@@ -374,7 +384,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
 
        for (i = 0; i < dev->entries_nr; i++) {
                r = request_threaded_irq(dev->host_msix_entries[i].vector,
-                                        NULL, kvm_assigned_dev_thread_msix,
+                                        kvm_assigned_dev_msix,
+                                        kvm_assigned_dev_thread_msix,
                                         0, dev->irq_name, dev);
                if (r)
                        goto err;
@@ -635,7 +646,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        int r = 0, idx;
        struct kvm_assigned_dev_kernel *match;
        struct pci_dev *dev;
-       u8 header_type;
 
        if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
                return -EINVAL;
@@ -668,8 +678,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        }
 
        /* Don't allow bridges to be assigned */
-       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-       if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+       if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
                r = -EPERM;
                goto out_put;
        }
index f59c1e8de7a2e62b5977d90ab992e1c2240e80f5..7d7e2aaffece234a81cef3f4181cc7ffe5bb14cb 100644 (file)
@@ -198,7 +198,7 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
 }
 
 static int
-kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct kvm_irq_routing_table *irq_rt;
        struct _irqfd *irqfd, *tmp;
@@ -212,12 +212,12 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
                return -ENOMEM;
 
        irqfd->kvm = kvm;
-       irqfd->gsi = gsi;
+       irqfd->gsi = args->gsi;
        INIT_LIST_HEAD(&irqfd->list);
        INIT_WORK(&irqfd->inject, irqfd_inject);
        INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
 
-       file = eventfd_fget(fd);
+       file = eventfd_fget(args->fd);
        if (IS_ERR(file)) {
                ret = PTR_ERR(file);
                goto fail;
@@ -298,19 +298,19 @@ kvm_eventfd_init(struct kvm *kvm)
  * shutdown any irqfd's that match fd+gsi
  */
 static int
-kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct _irqfd *irqfd, *tmp;
        struct eventfd_ctx *eventfd;
 
-       eventfd = eventfd_ctx_fdget(fd);
+       eventfd = eventfd_ctx_fdget(args->fd);
        if (IS_ERR(eventfd))
                return PTR_ERR(eventfd);
 
        spin_lock_irq(&kvm->irqfds.lock);
 
        list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-               if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+               if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
                        /*
                         * This rcu_assign_pointer is needed for when
                         * another thread calls kvm_irq_routing_update before
@@ -338,12 +338,15 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
 }
 
 int
-kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
-       if (flags & KVM_IRQFD_FLAG_DEASSIGN)
-               return kvm_irqfd_deassign(kvm, fd, gsi);
+       if (args->flags & ~KVM_IRQFD_FLAG_DEASSIGN)
+               return -EINVAL;
+
+       if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
+               return kvm_irqfd_deassign(kvm, args);
 
-       return kvm_irqfd_assign(kvm, fd, gsi);
+       return kvm_irqfd_assign(kvm, args);
 }
 
 /*
index a6a0365475edafc1935e46c04297931eadb8663c..5afb43114020a82e0f3275d94e78d0d8fcba470c 100644 (file)
@@ -332,6 +332,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
         */
        hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
                if (ei->type == KVM_IRQ_ROUTING_MSI ||
+                   ue->type == KVM_IRQ_ROUTING_MSI ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return r;
 
index 7e140683ff14d503a9714058cadd9dde7e4ffaf9..44ee7124b16dae1820ca1ca1c79f2a099979da12 100644 (file)
@@ -2047,7 +2047,7 @@ static long kvm_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&data, argp, sizeof data))
                        goto out;
-               r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
+               r = kvm_irqfd(kvm, &data);
                break;
        }
        case KVM_IOEVENTFD: {
@@ -2845,6 +2845,7 @@ void kvm_exit(void)
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
        free_cpumask_var(cpus_hardware_enabled);
+       __free_page(fault_page);
        __free_page(hwpoison_page);
        __free_page(bad_page);
 }